use C4::Charset;
use C4::Linker;
use C4::OAI::Sets;
+use C4::Debug;
use Koha::Cache;
use Koha::Authority::Types;
use Koha::Acquisition::Currencies;
+use Koha::SearchEngine;
use vars qw(@ISA @EXPORT);
+use vars qw($debug $cgi_debug);
BEGIN {
=head2 ModZebra
- ModZebra( $biblionumber, $op, $server );
+ ModZebra( $biblionumber, $op, $server, $record );
$biblionumber is the biblionumber we want to index
-$op is specialUpdate or delete, and is used to know what we want to do
+$op is specialUpdate or recordDelete, and is used to know what we want to do
$server is the server that we want to update
+$record is the update MARC record if it's available. If it's not supplied
+and is needed, it'll be loaded from the database.
+
=cut
sub ModZebra {
###Accepts a $server variable thus we can use it for biblios authorities or other zebra dbs
- my ( $biblionumber, $op, $server ) = @_;
- my $dbh = C4::Context->dbh;
+ my ( $biblionumber, $op, $server, $record ) = @_;
+ $debug && warn "ModZebra: update requested for: $biblionumber $op $server\n";
+ if ( C4::Context->preference('SearchEngine') eq 'Elasticsearch' ) {
- # true ModZebra commented until indexdata fixes zebraDB crashes (it seems they occur on multiple updates
- # at the same time
- # replaced by a zebraqueue table, that is filled with ModZebra to run.
- # the table is emptied by rebuild_zebra.pl script (using the -z switch)
+ # TODO abstract to a standard API that'll work for whatever
+ require Koha::ElasticSearch::Indexer;
+ my $indexer = Koha::ElasticSearch::Indexer->new(
+ {
+ index => $server eq 'biblioserver'
+ ? $Koha::SearchEngine::BIBLIOS_INDEX
+ : $Koha::SearchEngine::AUTHORITIES_INDEX
+ }
+ );
+ if ( $op eq 'specialUpdate' ) {
+ unless ($record) {
+ $record = GetMarcBiblio($biblionumber, 1);
+ }
+ my $records = [$record];
+ $indexer->update_index_background( [$biblionumber], [$record] );
+ }
+ elsif ( $op eq 'recordDelete' ) {
+ $indexer->delete_index_background( [$biblionumber] );
+ }
+ else {
+ croak "ModZebra called with unknown operation: $op";
+ }
+ } else {
+ my $dbh = C4::Context->dbh;
- my $check_sql = "SELECT COUNT(*) FROM zebraqueue
- WHERE server = ?
- AND biblio_auth_number = ?
- AND operation = ?
- AND done = 0";
- my $check_sth = $dbh->prepare_cached($check_sql);
- $check_sth->execute( $server, $biblionumber, $op );
- my ($count) = $check_sth->fetchrow_array;
- $check_sth->finish();
- if ( $count == 0 ) {
- my $sth = $dbh->prepare("INSERT INTO zebraqueue (biblio_auth_number,server,operation) VALUES(?,?,?)");
- $sth->execute( $biblionumber, $server, $op );
- $sth->finish;
+ # true ModZebra commented until indexdata fixes zebraDB crashes (it seems they occur on multiple updates
+ # at the same time
+ # replaced by a zebraqueue table, that is filled with ModZebra to run.
+ # the table is emptied by rebuild_zebra.pl script (using the -z switch)
+ my $check_sql = "SELECT COUNT(*) FROM zebraqueue
+ WHERE server = ?
+ AND biblio_auth_number = ?
+ AND operation = ?
+ AND done = 0";
+ my $check_sth = $dbh->prepare_cached($check_sql);
+ $check_sth->execute( $server, $biblionumber, $op );
+ my ($count) = $check_sth->fetchrow_array;
+ $check_sth->finish();
+ if ( $count == 0 ) {
+ my $sth = $dbh->prepare("INSERT INTO zebraqueue (biblio_auth_number,server,operation) VALUES(?,?,?)");
+ $sth->execute( $biblionumber, $server, $op );
+ $sth->finish;
+ }
}
}
$sth = $dbh->prepare("UPDATE biblioitems SET marc=?,marcxml=? WHERE biblionumber=?");
$sth->execute( $record->as_usmarc(), $record->as_xml_record($encoding), $biblionumber );
$sth->finish;
- if ( C4::Context->preference('SearchEngine') eq 'ElasticSearch' ) {
-# shift to its on sub, so it can do it realtime or queue
- can_load( modules => { 'Koha::ElasticSearch::Indexer' => undef } );
- # need to get this from syspref probably biblio/authority for index
- my $indexer = Koha::ElasticSearch::Indexer->new();
- my $records = [$record];
- $indexer->update_index([$biblionumber], $records);
- }
- ModZebra( $biblionumber, "specialUpdate", "biblioserver" );
+ ModZebra( $biblionumber, "specialUpdate", "biblioserver", $record );
return $biblionumber;
}
=head1 SYNOPSIS
- my $indexer = Koha::ElasticSearch::Indexer->new({ index => 'biblios' });
- $indexer->delete_index();
+ my $indexer = Koha::ElasticSearch::Indexer->new(
+ { index => Koha::SearchEngine::BIBLIOS_INDEX } );
+ $indexer->drop_index();
$indexer->update_index(\@biblionumbers, \@records);
=head1 FUNCTIONS
sub update_index {
my ($self, $biblionums, $records) = @_;
+ # TODO should have a separate path for dealing with a large number
+ # of records at once where we use the bulk update functions in ES.
if ($biblionums) {
$self->_sanitise_records($biblionums, $records);
}
$self->update_index(@_);
}
-=head2 $indexer->delete_index();
+=head2 $indexer->delete_index($biblionums)
-Deletes the index from the elasticsearch server. Calling C<update_index>
-after this will recreate it again.
+C<$biblionums> is an arrayref of biblionumbers to delete from the index.
=cut
sub delete_index {
+ my ($self, $biblionums) = @_;
+
+ if ( !$self->store ) {
+ my $params = $self->get_elasticsearch_params();
+ $self->store(
+ Catmandu::Store::ElasticSearch->new(
+ %$params,
+ index_settings => $self->get_elasticsearch_settings(),
+ index_mappings => $self->get_elasticsearch_mappings(),
+ trace_calls => 1,
+ )
+ );
+ }
+ $self->store->bag->delete($_) foreach @$biblionums;
+ $self->store->bag->commit;
+}
+
+=head2 $indexer->delete_index_background($biblionums)
+
+Identical to L<delete_index>, this will return immediately and start a
+background process to do the actual deleting.
+
+=cut
+
+# TODO implement in the future
+
+sub delete_index_background {
+ my $self = shift;
+ $self->delete_index(@_);
+}
+=head2 $indexer->drop_index();
+
+Drops the index from the elasticsearch server. Calling C<update_index>
+after this will recreate it again.
+
+=cut
+
+sub drop_index {
my ($self) = @_;
if (!$self->store) {
+-- For now I'm keeping this form of table as it's easier to edit. When we get
+-- an interface, then we can then use the real form directly.
DROP TABLE IF EXISTS elasticsearch_mapping;
DROP TABLE IF EXISTS search_marc_to_field;
DROP TABLE IF EXISTS search_field;
INSERT INTO `elasticsearch_mapping` (`indexname`, `mapping`, `facet`, `type`, `marc21`, `unimarc`, `normarc`) VALUES ('biblios','an',FALSE,'number',NULL,'6179',NULL);
INSERT INTO `elasticsearch_mapping` (`indexname`, `mapping`, `facet`, `type`, `marc21`, `unimarc`, `normarc`) VALUES ('biblios','an',FALSE,'number',NULL,'6209',NULL);
INSERT INTO `elasticsearch_mapping` (`indexname`, `mapping`, `facet`, `type`, `marc21`, `unimarc`, `normarc`) VALUES ('biblios','an',FALSE,'number',NULL,'6219',NULL);
+INSERT INTO `elasticsearch_mapping` (`indexname`, `mapping`, `facet`, `type`, `marc21`, `unimarc`, `normarc`) VALUES ('biblios','Host-Item-Number',FALSE,'number','7739','4619','7739');
-- Authorities: incomplete
INSERT INTO `elasticsearch_mapping` (`indexname`, `mapping`, `facet`, `type`, `marc21`, `unimarc`, `normarc`) VALUES ('authorities','Local-number',FALSE,'string','001',NULL,'001');