diff options
Diffstat (limited to 'cgi-bin')
| -rw-r--r-- | cgi-bin/find.cgi | 192 | ||||
| -rw-r--r-- | cgi-bin/indexer.pl | 146 |
2 files changed, 246 insertions, 92 deletions
diff --git a/cgi-bin/find.cgi b/cgi-bin/find.cgi index 3e5c8a2..ab066dd 100644 --- a/cgi-bin/find.cgi +++ b/cgi-bin/find.cgi @@ -3,16 +3,18 @@ use strict; use warnings; use Storable qw(retrieve); -use Encode qw(decode_utf8); +use Encode qw(decode_utf8 encode_utf8); +use URI::Escape qw(uri_unescape); use HTML::Escape qw(escape_html); # Configuration -my $max_parallel = 50; # max no. of parallel searches -my $lock_timeout = 30; # drop stale locks after this many seconds -my $max_results = 20; # max search results -my $min_query_len = 3; # min query length to avoid matching 'a', 'e' -my $index_file = 'search_index.dat'; # index file -my $lock_dir = '/tmp/search_locks'; # lock file directory +my $max_parallel = 50; # Max parallel search requests +my $lock_timeout = 30; # Seconds before dropping stale locks +my $max_results = 20; # Max search results to display +my $sa_file = 'sa.bin'; # Suffix Array index +my $cp_file = 'corpus.bin'; # Raw text corpus +my $map_file = 'file_map.dat'; # File metadata +my $lock_dir = '/tmp/search_locks'; # Semaphore directory # Concurrency control mkdir $lock_dir, 0777 unless -d $lock_dir; @@ -24,78 +26,161 @@ while (my $file = readdir($dh)) { next unless $file =~ /\.lock$/; my $path = "$lock_dir/$file"; my $mtime = (stat($path))[9] || 0; - ( $now - $mtime > $lock_timeout ) ? unlink($path) : $active_count++; + ($now - $mtime > $lock_timeout) ? unlink($path) : $active_count++; } closedir($dh); -# Too many search requests +# Template variables +my $year = (localtime)[5] + 1900; +my $search_text = ''; + +# Busy check if ($active_count >= $max_parallel) { print "Content-Type: text/html\n\n"; - render_html("<p>Server busy. Please try again in a few seconds.</p>", "", (localtime)[5]+1900); + render_html("<p>Server busy. Please try again in a few seconds.</p>", "", $year); exit; } +# Create semaphore lock my $lock_file = "$lock_dir/$$.lock"; open(my $fh_lock, '>', $lock_file); -# Decode search text as utf-8, toss non-printable chars, trim -my $search_text = ''; +# Query decoding if (($ENV{QUERY_STRING} || '') =~ /^q=([^&]*)/) { - $search_text = decode_utf8($1 // ""); + my $raw_q = $1; + $raw_q =~ tr/+/ /; + $search_text = uri_unescape($raw_q); + $search_text = decode_utf8($search_text // ""); $search_text =~ s/\P{Print}//g; $search_text = substr($search_text, 0, 64); $search_text =~ s/^\s+|\s+$//g; } -# Pre-prepare common template variables my $safe_search_text = escape_html($search_text); -my $year = (localtime)[5] + 1900; print "Content-Type: text/html\n\n"; -# Input validation if ($search_text eq '') { final_output("<p>Please enter a search term above.</p>"); } -if (length($search_text) < $min_query_len) { - final_output("<p>Search term is too short. Please enter at least $min_query_len characters.</p>"); -} - -if (!-f $index_file) { - final_output("<p>Search temporarily unavailable.</p>"); -} - -my $index = retrieve($index_file); +# Binary search my @results; -my $found = 0; - -foreach my $url (sort keys %$index) { - last if $found >= $max_results; - my $data = $index->{$url}; - - # Grab 80 char snippet to chop at a word boundary later - next unless $data->{c} =~ /(.{0,40})(\Q$search_text\E)(.{0,40})/is; - my ($before, $actual, $after) = ($1, $2, $3); - $found++; - - # Chop at 25 or word boundary - $after =~ s/\s\S*$// if length($after) > 25; - $before =~ s/^.*?\s// if length($before) > 25; - - $before = ($before =~ /\S/) ? ucfirst($before) : ""; - $actual = ($before eq "") ? ucfirst($actual) : $actual; - - my $snippet = escape_html($before) . "<b>" . escape_html($actual) . "</b>" . escape_html($after) . "..."; - - push @results, { - path => $url, - title => escape_html($data->{t}), - snippet => $snippet - }; +my $query = encode_utf8(lc($search_text)); +my $query_len = length($query); + +if (-f $sa_file && -f $cp_file) { + open(my $fh_sa, '<', $sa_file) or die $!; + open(my $fh_cp, '<', $cp_file) or die $!; + binmode($fh_sa); + binmode($fh_cp); + + my $file_map = retrieve($map_file); + my $total_suffixes = (-s $sa_file) / 4; + + # Find left boundary + my ($low, $high) = (0, $total_suffixes - 1); + my $first_hit = -1; + + while ($low <= $high) { + my $mid = int(($low + $high) / 2); + seek($fh_sa, $mid * 4, 0); + read($fh_sa, my $bin_off, 4); + my $off = unpack("L", $bin_off); + seek($fh_cp, $off, 0); + read($fh_cp, my $text, $query_len); + + my $cmp = $text cmp $query; + if ($cmp >= 0) { + $first_hit = $mid if $cmp == 0; + $high = $mid - 1; + } else { + $low = $mid + 1; + } + } + + # Collect results if found + if ($first_hit != -1) { + my $last_hit = $first_hit; + ($low, $high) = ($first_hit, $total_suffixes - 1); + + # Find right boundary + while ($low <= $high) { + my $mid = int(($low + $high) / 2); + seek($fh_sa, $mid * 4, 0); + read($fh_sa, my $bin_off, 4); + my $off = unpack("L", $bin_off); + seek($fh_cp, $off, 0); + read($fh_cp, my $text, $query_len); + + if (($text cmp $query) <= 0) { + $last_hit = $mid if $text eq $query; + $low = $mid + 1; + } else { + $high = $mid - 1; + } + } + + my %seen; + for my $i ($first_hit .. $last_hit) { + seek($fh_sa, $i * 4, 0); + read($fh_sa, my $bin_off, 4); + my $offset = unpack("L", $bin_off); + + foreach my $m (@$file_map) { + if ($offset >= $m->{start} && $offset < $m->{end}) { + if (!$seen{$m->{path}}++) { + # Capture more than 50 chars for trimming + my $snip_start = ($offset - 30 < $m->{start}) ? $m->{start} : $offset - 30; + my $max_len = $m->{end} - $snip_start; + my $read_len = ($max_len > 120) ? 120 : $max_len; + seek($fh_cp, $snip_start, 0); + read($fh_cp, my $raw_snip, $read_len); + + my $snippet = decode_utf8($raw_snip, Encode::FB_QUIET) // $raw_snip; + $snippet =~ s/\s+/ /g; # Normalize whitespace + + # Trim start: Partial word removal + if ($snip_start > $m->{start}) { + $snippet =~ s/^[^\s]*\s//; + } + + # Trim end: Length limit and partial word removal + my $has_more = 0; + if (length($snippet) > 50) { + $snippet = substr($snippet, 0, 50); + $has_more = 1 if $snippet =~ s/\s+[^\s]*$//; + } + elsif ($snip_start + $read_len < $m->{end}) { + # This check handles snippets that are naturally short but + # there's still more text in the article we didn't read + $has_more = 1; + } + + # Cleanup & capitalize + $snippet = ucfirst($snippet); + $snippet = escape_html($snippet) . ($has_more ? "..." : ""); + + my $clean_path = $m->{path}; + $clean_path =~ s|^\.\./_site/||; + + push @results, { + path => $clean_path, + title => $m->{title},, + snippet => $snippet + }; + } + last; + } + } + last if scalar @results >= $max_results; + } + } + close($fh_sa); + close($fh_cp); } -# Format results list +# --- Formatting & Output --- my $list_html = ""; if (@results == 0) { $list_html = "<p>No results found for \"<b>$safe_search_text</b>\".</p>"; @@ -107,12 +192,11 @@ if (@results == 0) { final_output($list_html); -# Helper to ensure layout is always preserved +# --- Helpers --- sub final_output { my ($content) = @_; render_html($content, $safe_search_text, $year); - close($fh_lock) if $fh_lock; - unlink($lock_file) if -f $lock_file; + if ($fh_lock) { close($fh_lock); unlink($lock_file); } exit; } diff --git a/cgi-bin/indexer.pl b/cgi-bin/indexer.pl index 38a918e..69f6838 100644 --- a/cgi-bin/indexer.pl +++ b/cgi-bin/indexer.pl @@ -2,45 +2,115 @@ use strict; use warnings; -use Storable qw(nstore); +use File::Find; +use Storable qw(store); +use Encode qw(encode_utf8); use HTML::Entities qw(decode_entities); +use Time::HiRes qw(gettimeofday tv_interval); -# --- Configuration --- -my $built_site_dir = '../_site/log'; -my $output_file = '../_site/cgi-bin/search_index.dat'; -my %index; - -print "Building search index from $built_site_dir...\n"; - -foreach my $path (glob("$built_site_dir/*/index.html")) { - next unless open(my $fh, '<:utf8', $path); - my $html = do { local $/; <$fh> }; - close($fh); - - # Extract Title and Main Content - my ($title) = $html =~ m|<title>(.*?)</title>|is; - my ($main) = $html =~ m|<main>(.*?)</main>|is; - $main //= ''; - - # Strip HTML and clean prose - $main =~ s|<pre[^>]*>.*?</pre>| |gs; - $main =~ s|<code[^>]*>.*?</code>| |gs; - $main =~ s|<[^>]+>| |g; - $main = decode_entities($main); - $main =~ s|\s+| |g; - $main =~ s/^\s+|\s+$//g; - - # Normalize path - my $url = $path; - $url =~ s|^\.\./_site/||; # Remove local build directory - $url =~ s|^\.\./||; # Remove any leading dots - $url =~ s|^/+||; # Remove leading slashes - - $index{$url} = { - t => $title || "Untitled", - c => $main - }; +my $dir = '../_site/log'; +my $cgi_dir = '../_site/cgi-bin/'; +my $corpus_file = "${cgi_dir}corpus.bin"; +my $sa_file = "${cgi_dir}sa.bin"; +my $map_file = "${cgi_dir}file_map.dat"; + +my %excluded_files = ( + 'index.html' => 1, # /log/index.html +); + +# Start timing +my $t0 = [gettimeofday]; + +my $corpus = ""; +my @file_map; + +print "Building corpus...\n"; + +find({ + wanted => sub { + # Only index index.html files + return unless -f $_ && $_ eq 'index.html'; + + my $rel_path = $File::Find::name; + $rel_path =~ s|^\Q$dir\E/?||; + return if $excluded_files{$rel_path}; + + if (open my $fh, '<:encoding(UTF-8)', $_) { + my $content = do { local $/; <$fh> }; + close $fh; + + my ($title) = $content =~ m|<title>(.*?)</title>|is; + $title //= (split('/', $File::Find::name))[-2]; # Fallback to folder name + $title =~ s/^\s+|\s+$//g; + + # Extract content from <main> or use whole file + my ($text) = $content =~ m|<main>(.*?)</main>|is; + $text //= $content; + + # Strip tags and normalize whitespace + $text =~ s|<pre[^>]*>.*?</pre>| |gs; + $text =~ s|<code[^>]*>.*?</code>| |gs; + $text =~ s|<[^>]+>| |g; + $text = decode_entities($text); + $text =~ s|\s+| |g; + $text =~ s/^\s+|\s+$//g; + + # CRITICAL: Convert to lowercase and then to raw bytes + # This ensures length() and substr() work on byte offsets for seek() + my $raw_entry = encode_utf8(lc($text) . "\0"); + + my $start = length($corpus); + $corpus .= $raw_entry; + + push @file_map, { + start => $start, + end => length($corpus), + title => $title, + path => $File::Find::name + }; + } + }, + no_chdir => 0, +}, $dir); + +print "Sorting suffixes...\n"; + +# Initialize the array of indices +my @sa = 0 .. (length($corpus) - 1); + +# Use a block that forces byte-level comparison +{ + use bytes; + @sa = sort { + # First 64 bytes check (fast path) + (substr($corpus, $a, 64) cmp substr($corpus, $b, 64)) || + # Full string fallback (required for correctness) + (substr($corpus, $a) cmp substr($corpus, $b)) + } @sa; } -nstore(\%index, $output_file); -printf("Index complete: %d files (%.2f KB)\n", scalar(keys %index), (-s $output_file) / 1024); +print "Writing index files to disk...\n"; + +open my $cfh, '>', $corpus_file or die "Cannot write $corpus_file: $!"; +binmode($cfh); # Raw byte mode +print $cfh $corpus; +close $cfh; + +open my $sfh, '>', $sa_file or die "Cannot write $sa_file: $!"; +binmode($sfh); +# Pack as 32-bit unsigned integers (standard 'L') +print $sfh pack("L*", @sa); +close $sfh; + +store \@file_map, $map_file; + +my $elapsed = tv_interval($t0); +my $c_size = -s $corpus_file; +my $s_size = -s $sa_file; + +printf "\nIndexing Complete!\n"; +printf "Total Time: %.4f seconds\n", $elapsed; +printf "Corpus Size: %.2f KB\n", $c_size / 1024; +printf "Suffix Array: %.2f KB\n", $s_size / 1024; +printf "Files Processed: %d\n", scalar(@file_map); + |
