From 4623c35ed189c8ed6b9ced043a9df4e7f9558ba0 Mon Sep 17 00:00:00 2001 From: Sadeep Madurange Date: Sun, 8 Mar 2026 18:43:15 +0800 Subject: mmap. --- _site/cgi-bin/find_sa_mmap.cgi | 212 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 212 insertions(+) create mode 100644 _site/cgi-bin/find_sa_mmap.cgi (limited to '_site/cgi-bin/find_sa_mmap.cgi') diff --git a/_site/cgi-bin/find_sa_mmap.cgi b/_site/cgi-bin/find_sa_mmap.cgi new file mode 100644 index 0000000..979f4d5 --- /dev/null +++ b/_site/cgi-bin/find_sa_mmap.cgi @@ -0,0 +1,212 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use Storable qw(retrieve); +use Encode qw(decode_utf8); +use HTML::Escape qw(escape_html); +use Time::HiRes qw(gettimeofday tv_interval); +use BSD::Resource; +use Sys::Mmap; + +my $sa_file = 'sa.bin'; # Suffix Array index +my $cp_file = 'corpus.bin'; # Raw text corpus +my $map_file = 'file_map.dat'; # File metadata + +# 1. Start Benchmark Timer +my $start_time = [gettimeofday]; +my $files_read = 0; # Track IO Activity + +# Decode search text +my $search_text = ''; +if (($ENV{QUERY_STRING} || '') =~ /^q=([^&]*)/) { + $search_text = decode_utf8($1 // ""); + $search_text =~ s/\P{Print}//g; + $search_text = substr($search_text, 0, 64); + $search_text =~ s/^\s+|\s+$//g; +} + +# We search using lowercase for the case-insensitive index +my $query = lc($search_text); +my $query_len = length($query); +my @results; + +if ($query_len >= 3 && -f 'sa.bin' && -f 'corpus.bin') { + open(my $fh_sa, '<', $sa_file) or die $!; + open(my $fh_cp, '<', $cp_file) or die $!; + binmode($fh_sa); + binmode($fh_cp); + + # Memory map files + my ($sa_mapped, $cp_mapped); + mmap($sa_mapped, 0, PROT_READ, MAP_SHARED, $fh_sa) or die "Could not map SA: $!"; + mmap($cp_mapped, 0, PROT_READ, MAP_SHARED, $fh_cp) or die "Could not map Corpus: $!"; + + my $file_map = retrieve($map_file); + $files_read += 3; + my $total_suffixes = (-s $sa_file) / 4; + + # Range Search: Find Left and Right boundaries + my ($low, $high) = (0, $total_suffixes - 1); + my $first_hit = -1; + while ($low <= $high) { + my $mid = int(($low + $high) / 2); + my $off = unpack("L", substr($sa_mapped, $mid * 4, 4)); + my $text = substr($cp_mapped, $off, $query_len); + my $cmp = $text cmp $query; + if ($cmp >= 0) { + $first_hit = $mid if $cmp == 0; + $high = $mid - 1; + } else { + $low = $mid + 1; + } + } + + if ($first_hit != -1) { + ($low, $high) = ($first_hit, $total_suffixes - 1); + my $last_hit = $first_hit; + while ($low <= $high) { + my $mid = int(($low + $high) / 2); + my $off = unpack("L", substr($sa_mapped, $mid * 4, 4)); + my $text = substr($cp_mapped, $off, $query_len); + if (($text cmp $query) <= 0) { + $last_hit = $mid if $text eq $query; + $low = $mid + 1; + } else { + $high = $mid - 1; + } + } + + # Collect unique file results + my %seen; + for my $i ($first_hit .. $last_hit) { + my $offset = unpack("L", substr($sa_mapped, $i * 4, 4)); + foreach my $m (@$file_map) { + if ($offset >= $m->{start} && $offset < $m->{end}) { + if (!$seen{$m->{path}}++) { + # Capture more than 50 chars for trimming + my $snip_start = ($offset - 30 < $m->{start}) ? $m->{start} : $offset - 30; + my $max_len = $m->{end} - $snip_start; + my $read_len = ($max_len > 120) ? 120 : $max_len; + my $raw_snip = substr($cp_mapped, $snip_start, $read_len); + my $snippet = decode_utf8($raw_snip, Encode::FB_QUIET) // $raw_snip; + $snippet =~ s/\s+/ /g; # Normalize whitespace + + # Trim start: Partial word removal + if ($snip_start > $m->{start}) { + $snippet =~ s/^[^\s]*\s//; + } + + # Trim end: Length limit and partial word removal + my $has_more = 0; + if (length($snippet) > 50) { + $snippet = substr($snippet, 0, 50); + $has_more = 1 if $snippet =~ s/\s+[^\s]*$//; + } + elsif ($snip_start + $read_len < $m->{end}) { + # This check handles snippets that are naturally short but + # there's still more text in the article we didn't read + $has_more = 1; + } + + # Cleanup & capitalize + $snippet = ucfirst($snippet); + $snippet = escape_html($snippet) . ($has_more ? "..." : ""); + + my $clean_path = $m->{path}; + $clean_path =~ s|^\.\./_site/||; + + push @results, { + path => $clean_path, + title => $m->{title},, + snippet => $snippet + }; + } + last; + } + } + last if scalar @results >= 1000; + } + } + close($fh_sa); + close($fh_cp); +} + +# 2. Calculate Metrics +my $end_time = [gettimeofday]; +my $elapsed = tv_interval($start_time, $end_time); + +my $rusage = getrusage(); +my $user_cpu = $rusage->utime; +my $system_cpu = $rusage->stime; +my $max_rss = $rusage->maxrss; + +# 3. Output +print "Content-Type: text/html\n\n"; + +my $list; +if ($search_text eq '') { + $list = "

Please enter a search term above.

"; +} elsif (@results == 0) { + $list = "

No results found for \"" . escape_html($search_text) . "\".

"; +} else { + $list = ""; +} + +my $safe_search_text = escape_html($search_text); +my $year = (localtime)[5] + 1900; + +print <<"HTML"; + + + + + + Search + + + + + +
+
+

Search

+
+ + +
+ $list + +
+ Performance Metrics:
+ Total Time: @{[ sprintf("%.4f", $elapsed) ]} seconds
+ User CPU: $user_cpu s
+ System CPU: $system_cpu s
+ Peak RAM: $max_rss KB
+ Files Read: $files_read (IO Activity) +
+
+
+ + + +HTML -- cgit v1.2.3