koha-cvs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Koha-cvs] koha/C4 Search.pm


From: paul poulain
Subject: [Koha-cvs] koha/C4 Search.pm
Date: Fri, 27 Apr 2007 19:57:12 +0000

CVSROOT:        /sources/koha
Module name:    koha
Changes by:     paul poulain <tipaul>   07/04/27 19:57:12

Modified files:
        C4             : Search.pm 

Log message:
        koha NZ speed improvement for title ordering

CVSWeb URLs:
http://cvs.savannah.gnu.org/viewcvs/koha/C4/Search.pm?cvsroot=koha&r1=1.131&r2=1.132

Patches:
Index: Search.pm
===================================================================
RCS file: /sources/koha/koha/C4/Search.pm,v
retrieving revision 1.131
retrieving revision 1.132
diff -u -b -r1.131 -r1.132
--- Search.pm   25 Apr 2007 16:26:42 -0000      1.131
+++ Search.pm   27 Apr 2007 19:57:12 -0000      1.132
@@ -25,7 +25,7 @@
 use vars qw($VERSION @ISA @EXPORT @EXPORT_OK %EXPORT_TAGS);
 
 # set the version for version checking
-$VERSION = do { my @v = '$Revision: 1.131 $' =~ /\d+/g;
+$VERSION = do { my @v = '$Revision: 1.132 $' =~ /\d+/g;
     shift(@v) . "." . join( "_", map { sprintf "%03d", $_ } @v );
 };
 
@@ -1065,7 +1065,7 @@
     my $result = NZanalyse($koha_query);
 #     use Data::Dumper;
 #     warn "=========="address@hidden;
-    return (undef,NZorder($result,@$sort_by_ref[0]),undef);
+    return 
(undef,NZorder($result,@$sort_by_ref[0],$results_per_page,$offset),undef);
 }
 
 =item
@@ -1204,9 +1204,11 @@
 }
 
 sub NZorder {
-    my ($biblionumbers, $ordering) = @_;
+    my ($biblionumbers, $ordering,$results_per_page,$offset) = @_;
     # order title asc by default
     $ordering = '1=36 <i' unless $ordering;
+    $results_per_page=20 unless $results_per_page;
+    $offset = 0 unless $offset;
     my $dbh = C4::Context->dbh;
     #
     # order by POPULARITY
@@ -1347,23 +1349,32 @@
     } else { 
         # the title is in the biblionumbers string, so we just need to build a 
hash, sort it and return
         my %result;
+#         splice(@X,$results_per_page*(1+$offset));
+#         splice(@X,0,$results_per_page*$offset);
         foreach (split /,/,$biblionumbers) {
             my ($biblionumber,$title) = split /;/,$_;
             # hint : the result is sorted by title.biblionumber because we can 
have X biblios with the same title
             # and we don't want to get only 1 result for each of them !!!
-            $result{$title.$biblionumber}=GetMarcBiblio($biblionumber);
+            # hint & speed improvement : we can order without reading the 
record
+            # so order, and read records only for the requested page !
+            $result{$title.$biblionumber}=$biblionumber;
         }
         # sort the hash and return the same structure as GetRecords (Zebra 
querying)
         my $result_hash;
         my $numbers=0;
         if ($ordering eq '1=36 <i') { # sort by title desc
             foreach my $key (sort (keys %result)) {
-                $result_hash->{'RECORDS'}[$numbers++] = 
$result{$key}->as_usmarc();
+                $result_hash->{'RECORDS'}[$numbers++] = $result{$key};
             }
         } else { # sort by title ASC
             foreach my $key (sort { $a <=> $b } (keys %result)) {
-                $result_hash->{'RECORDS'}[$numbers++] = 
$result{$key}->as_usmarc();
+                $result_hash->{'RECORDS'}[$numbers++] = $result{$key};
+            }
             }
+        # for the requested page, replace biblionumber by the complete record
+        # speed improvement : avoid reading too much things
+        for (my 
$counter=$offset;$counter<=$offset+$results_per_page;$counter++) {
+            $result_hash->{'RECORDS'}[$counter] = 
GetMarcBiblio($result_hash->{'RECORDS'}[$counter])->as_usmarc;
         }
         my $finalresult=();
         $result_hash->{'hits'} = $numbers;




reply via email to

[Prev in Thread] Current Thread [Next in Thread]