diff options
| author | Sadeep Madurange <sadeep@asciimx.com> | 2026-01-03 16:07:15 +0800 |
|---|---|---|
| committer | Sadeep Madurange <sadeep@asciimx.com> | 2026-01-03 16:07:15 +0800 |
| commit | d3aff7d14e66f19a8e4e9a3315494ad612ccc1f5 (patch) | |
| tree | 5c528f296a1013140f9bc7f109c1ba738ea4c1e0 /cgi-bin/_site | |
| parent | 15205d0cf770058b59be07e00f6dbc6523b9cede (diff) | |
| download | www-d3aff7d14e66f19a8e4e9a3315494ad612ccc1f5.tar.gz | |
Remove 'log' from navlink, update cu response, change to lup.
Diffstat (limited to 'cgi-bin/_site')
| -rw-r--r-- | cgi-bin/_site/feed.xml | 1 | ||||
| -rw-r--r-- | cgi-bin/_site/find.cgi | 247 | ||||
| -rw-r--r-- | cgi-bin/_site/indexer.pl | 116 | ||||
| -rw-r--r-- | cgi-bin/_site/robots.txt | 1 | ||||
| -rw-r--r-- | cgi-bin/_site/sitemap.xml | 3 |
5 files changed, 368 insertions, 0 deletions
diff --git a/cgi-bin/_site/feed.xml b/cgi-bin/_site/feed.xml new file mode 100644 index 0000000..66f8d32 --- /dev/null +++ b/cgi-bin/_site/feed.xml @@ -0,0 +1 @@ +<?xml version="1.0" encoding="utf-8"?><feed xmlns="http://www.w3.org/2005/Atom" ><generator uri="https://jekyllrb.com/" version="4.4.1">Jekyll</generator><link href="http://localhost:4000/feed.xml" rel="self" type="application/atom+xml" /><link href="http://localhost:4000/" rel="alternate" type="text/html" /><updated>2026-01-03T14:43:24+08:00</updated><id>http://localhost:4000/feed.xml</id></feed>
\ No newline at end of file diff --git a/cgi-bin/_site/find.cgi b/cgi-bin/_site/find.cgi new file mode 100644 index 0000000..ab066dd --- /dev/null +++ b/cgi-bin/_site/find.cgi @@ -0,0 +1,247 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use Storable qw(retrieve); +use Encode qw(decode_utf8 encode_utf8); +use URI::Escape qw(uri_unescape); +use HTML::Escape qw(escape_html); + +# Configuration +my $max_parallel = 50; # Max parallel search requests +my $lock_timeout = 30; # Seconds before dropping stale locks +my $max_results = 20; # Max search results to display +my $sa_file = 'sa.bin'; # Suffix Array index +my $cp_file = 'corpus.bin'; # Raw text corpus +my $map_file = 'file_map.dat'; # File metadata +my $lock_dir = '/tmp/search_locks'; # Semaphore directory + +# Concurrency control +mkdir $lock_dir, 0777 unless -d $lock_dir; +my $active_count = 0; +my $now = time(); + +opendir(my $dh, $lock_dir); +while (my $file = readdir($dh)) { + next unless $file =~ /\.lock$/; + my $path = "$lock_dir/$file"; + my $mtime = (stat($path))[9] || 0; + ($now - $mtime > $lock_timeout) ? unlink($path) : $active_count++; +} +closedir($dh); + +# Template variables +my $year = (localtime)[5] + 1900; +my $search_text = ''; + +# Busy check +if ($active_count >= $max_parallel) { + print "Content-Type: text/html\n\n"; + render_html("<p>Server busy. Please try again in a few seconds.</p>", "", $year); + exit; +} + +# Create semaphore lock +my $lock_file = "$lock_dir/$$.lock"; +open(my $fh_lock, '>', $lock_file); + +# Query decoding +if (($ENV{QUERY_STRING} || '') =~ /^q=([^&]*)/) { + my $raw_q = $1; + $raw_q =~ tr/+/ /; + $search_text = uri_unescape($raw_q); + $search_text = decode_utf8($search_text // ""); + $search_text =~ s/\P{Print}//g; + $search_text = substr($search_text, 0, 64); + $search_text =~ s/^\s+|\s+$//g; +} + +my $safe_search_text = escape_html($search_text); + +print "Content-Type: text/html\n\n"; + +if ($search_text eq '') { + final_output("<p>Please enter a search term above.</p>"); +} + +# Binary search +my @results; +my $query = encode_utf8(lc($search_text)); +my $query_len = length($query); + +if (-f $sa_file && -f $cp_file) { + open(my $fh_sa, '<', $sa_file) or die $!; + open(my $fh_cp, '<', $cp_file) or die $!; + binmode($fh_sa); + binmode($fh_cp); + + my $file_map = retrieve($map_file); + my $total_suffixes = (-s $sa_file) / 4; + + # Find left boundary + my ($low, $high) = (0, $total_suffixes - 1); + my $first_hit = -1; + + while ($low <= $high) { + my $mid = int(($low + $high) / 2); + seek($fh_sa, $mid * 4, 0); + read($fh_sa, my $bin_off, 4); + my $off = unpack("L", $bin_off); + seek($fh_cp, $off, 0); + read($fh_cp, my $text, $query_len); + + my $cmp = $text cmp $query; + if ($cmp >= 0) { + $first_hit = $mid if $cmp == 0; + $high = $mid - 1; + } else { + $low = $mid + 1; + } + } + + # Collect results if found + if ($first_hit != -1) { + my $last_hit = $first_hit; + ($low, $high) = ($first_hit, $total_suffixes - 1); + + # Find right boundary + while ($low <= $high) { + my $mid = int(($low + $high) / 2); + seek($fh_sa, $mid * 4, 0); + read($fh_sa, my $bin_off, 4); + my $off = unpack("L", $bin_off); + seek($fh_cp, $off, 0); + read($fh_cp, my $text, $query_len); + + if (($text cmp $query) <= 0) { + $last_hit = $mid if $text eq $query; + $low = $mid + 1; + } else { + $high = $mid - 1; + } + } + + my %seen; + for my $i ($first_hit .. $last_hit) { + seek($fh_sa, $i * 4, 0); + read($fh_sa, my $bin_off, 4); + my $offset = unpack("L", $bin_off); + + foreach my $m (@$file_map) { + if ($offset >= $m->{start} && $offset < $m->{end}) { + if (!$seen{$m->{path}}++) { + # Capture more than 50 chars for trimming + my $snip_start = ($offset - 30 < $m->{start}) ? $m->{start} : $offset - 30; + my $max_len = $m->{end} - $snip_start; + my $read_len = ($max_len > 120) ? 120 : $max_len; + seek($fh_cp, $snip_start, 0); + read($fh_cp, my $raw_snip, $read_len); + + my $snippet = decode_utf8($raw_snip, Encode::FB_QUIET) // $raw_snip; + $snippet =~ s/\s+/ /g; # Normalize whitespace + + # Trim start: Partial word removal + if ($snip_start > $m->{start}) { + $snippet =~ s/^[^\s]*\s//; + } + + # Trim end: Length limit and partial word removal + my $has_more = 0; + if (length($snippet) > 50) { + $snippet = substr($snippet, 0, 50); + $has_more = 1 if $snippet =~ s/\s+[^\s]*$//; + } + elsif ($snip_start + $read_len < $m->{end}) { + # This check handles snippets that are naturally short but + # there's still more text in the article we didn't read + $has_more = 1; + } + + # Cleanup & capitalize + $snippet = ucfirst($snippet); + $snippet = escape_html($snippet) . ($has_more ? "..." : ""); + + my $clean_path = $m->{path}; + $clean_path =~ s|^\.\./_site/||; + + push @results, { + path => $clean_path, + title => $m->{title},, + snippet => $snippet + }; + } + last; + } + } + last if scalar @results >= $max_results; + } + } + close($fh_sa); + close($fh_cp); +} + +# --- Formatting & Output --- +my $list_html = ""; +if (@results == 0) { + $list_html = "<p>No results found for \"<b>$safe_search_text</b>\".</p>"; +} else { + $list_html = "<ul>" . join('', map { + "<li><a href=\"/$_->{path}\">$_->{title}</a><br><small>$_->{snippet}</small></li>" + } @results) . "</ul>"; +} + +final_output($list_html); + +# --- Helpers --- +sub final_output { + my ($content) = @_; + render_html($content, $safe_search_text, $year); + if ($fh_lock) { close($fh_lock); unlink($lock_file); } + exit; +} + +sub render_html { + my ($content, $q_val, $yr) = @_; + print <<"HTML"; +<!DOCTYPE html> +<html lang="en-us"> +<head> + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width, initial-scale=1"> + <title>Search</title> + <link rel="stylesheet" href="/assets/css/main.css"> + <link rel="stylesheet" href="/assets/css/skeleton.css"> +</head> +<body> + <div id="nav-container" class="container"> + <ul id="navlist" class="left"> + <li><a href="/" class="link-decor-none">hme</a></li> + <li><a href="/log/" class="link-decor-none">log</a></li> + <li><a href="/projects/" class="link-decor-none">poc</a></li> + <li><a href="/about/" class="link-decor-none">abt</a></li> + <li class="active"><a href="/cgi-bin/find.cgi" class="link-decor-none">sws</a></li> + <li><a href="/feed.xml" class="link-decor-none">rss</a></li> + </ul> + </div> + <main class="container" id="main"> + <div class="container"> + <h2>Search</h2> + <form action="" method="GET"> + <input id="search-box" type="text" name="q" value="$q_val"> + <input id="search-btn" type="submit" value="Search"> + </form> + $content + </div> + </main> + <div class="footer"> + <div class="container"> + <div class="twelve columns right container-2"> + <p id="footer-text">© ASCIIMX - $yr</p> + </div> + </div> + </div> +</body> +</html> +HTML +} + diff --git a/cgi-bin/_site/indexer.pl b/cgi-bin/_site/indexer.pl new file mode 100644 index 0000000..69f6838 --- /dev/null +++ b/cgi-bin/_site/indexer.pl @@ -0,0 +1,116 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use File::Find; +use Storable qw(store); +use Encode qw(encode_utf8); +use HTML::Entities qw(decode_entities); +use Time::HiRes qw(gettimeofday tv_interval); + +my $dir = '../_site/log'; +my $cgi_dir = '../_site/cgi-bin/'; +my $corpus_file = "${cgi_dir}corpus.bin"; +my $sa_file = "${cgi_dir}sa.bin"; +my $map_file = "${cgi_dir}file_map.dat"; + +my %excluded_files = ( + 'index.html' => 1, # /log/index.html +); + +# Start timing +my $t0 = [gettimeofday]; + +my $corpus = ""; +my @file_map; + +print "Building corpus...\n"; + +find({ + wanted => sub { + # Only index index.html files + return unless -f $_ && $_ eq 'index.html'; + + my $rel_path = $File::Find::name; + $rel_path =~ s|^\Q$dir\E/?||; + return if $excluded_files{$rel_path}; + + if (open my $fh, '<:encoding(UTF-8)', $_) { + my $content = do { local $/; <$fh> }; + close $fh; + + my ($title) = $content =~ m|<title>(.*?)</title>|is; + $title //= (split('/', $File::Find::name))[-2]; # Fallback to folder name + $title =~ s/^\s+|\s+$//g; + + # Extract content from <main> or use whole file + my ($text) = $content =~ m|<main>(.*?)</main>|is; + $text //= $content; + + # Strip tags and normalize whitespace + $text =~ s|<pre[^>]*>.*?</pre>| |gs; + $text =~ s|<code[^>]*>.*?</code>| |gs; + $text =~ s|<[^>]+>| |g; + $text = decode_entities($text); + $text =~ s|\s+| |g; + $text =~ s/^\s+|\s+$//g; + + # CRITICAL: Convert to lowercase and then to raw bytes + # This ensures length() and substr() work on byte offsets for seek() + my $raw_entry = encode_utf8(lc($text) . "\0"); + + my $start = length($corpus); + $corpus .= $raw_entry; + + push @file_map, { + start => $start, + end => length($corpus), + title => $title, + path => $File::Find::name + }; + } + }, + no_chdir => 0, +}, $dir); + +print "Sorting suffixes...\n"; + +# Initialize the array of indices +my @sa = 0 .. (length($corpus) - 1); + +# Use a block that forces byte-level comparison +{ + use bytes; + @sa = sort { + # First 64 bytes check (fast path) + (substr($corpus, $a, 64) cmp substr($corpus, $b, 64)) || + # Full string fallback (required for correctness) + (substr($corpus, $a) cmp substr($corpus, $b)) + } @sa; +} + +print "Writing index files to disk...\n"; + +open my $cfh, '>', $corpus_file or die "Cannot write $corpus_file: $!"; +binmode($cfh); # Raw byte mode +print $cfh $corpus; +close $cfh; + +open my $sfh, '>', $sa_file or die "Cannot write $sa_file: $!"; +binmode($sfh); +# Pack as 32-bit unsigned integers (standard 'L') +print $sfh pack("L*", @sa); +close $sfh; + +store \@file_map, $map_file; + +my $elapsed = tv_interval($t0); +my $c_size = -s $corpus_file; +my $s_size = -s $sa_file; + +printf "\nIndexing Complete!\n"; +printf "Total Time: %.4f seconds\n", $elapsed; +printf "Corpus Size: %.2f KB\n", $c_size / 1024; +printf "Suffix Array: %.2f KB\n", $s_size / 1024; +printf "Files Processed: %d\n", scalar(@file_map); + diff --git a/cgi-bin/_site/robots.txt b/cgi-bin/_site/robots.txt new file mode 100644 index 0000000..d297064 --- /dev/null +++ b/cgi-bin/_site/robots.txt @@ -0,0 +1 @@ +Sitemap: http://localhost:4000/sitemap.xml diff --git a/cgi-bin/_site/sitemap.xml b/cgi-bin/_site/sitemap.xml new file mode 100644 index 0000000..9bf9de2 --- /dev/null +++ b/cgi-bin/_site/sitemap.xml @@ -0,0 +1,3 @@ +<?xml version="1.0" encoding="UTF-8"?> +<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd" xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> +</urlset> |
