summaryrefslogtreecommitdiffstats
path: root/cgi-bin
diff options
context:
space:
mode:
authorSadeep Madurange <sadeep@asciimx.com>2026-01-04 17:57:39 +0800
committerSadeep Madurange <sadeep@asciimx.com>2026-01-04 17:57:39 +0800
commit1a4a6cb6d2aa2c8512e9637dc5dd95997321c444 (patch)
tree7b6b9e514b48d64dd811b75c680c1268b532aec6 /cgi-bin
parent16fe66dd83cbffa18af31676a380660ebce4e827 (diff)
downloadwww-1a4a6cb6d2aa2c8512e9637dc5dd95997321c444.tar.gz
Fix the search engine post.
Diffstat (limited to 'cgi-bin')
-rw-r--r--cgi-bin/_site/feed.xml1
-rw-r--r--cgi-bin/_site/find.cgi247
-rw-r--r--cgi-bin/_site/indexer.pl116
-rw-r--r--cgi-bin/_site/robots.txt1
-rw-r--r--cgi-bin/_site/sitemap.xml3
5 files changed, 0 insertions, 368 deletions
diff --git a/cgi-bin/_site/feed.xml b/cgi-bin/_site/feed.xml
deleted file mode 100644
index 66f8d32..0000000
--- a/cgi-bin/_site/feed.xml
+++ /dev/null
@@ -1 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?><feed xmlns="http://www.w3.org/2005/Atom" ><generator uri="https://jekyllrb.com/" version="4.4.1">Jekyll</generator><link href="http://localhost:4000/feed.xml" rel="self" type="application/atom+xml" /><link href="http://localhost:4000/" rel="alternate" type="text/html" /><updated>2026-01-03T14:43:24+08:00</updated><id>http://localhost:4000/feed.xml</id></feed> \ No newline at end of file
diff --git a/cgi-bin/_site/find.cgi b/cgi-bin/_site/find.cgi
deleted file mode 100644
index ab066dd..0000000
--- a/cgi-bin/_site/find.cgi
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;
-use Storable qw(retrieve);
-use Encode qw(decode_utf8 encode_utf8);
-use URI::Escape qw(uri_unescape);
-use HTML::Escape qw(escape_html);
-
-# Configuration
-my $max_parallel = 50; # Max parallel search requests
-my $lock_timeout = 30; # Seconds before dropping stale locks
-my $max_results = 20; # Max search results to display
-my $sa_file = 'sa.bin'; # Suffix Array index
-my $cp_file = 'corpus.bin'; # Raw text corpus
-my $map_file = 'file_map.dat'; # File metadata
-my $lock_dir = '/tmp/search_locks'; # Semaphore directory
-
-# Concurrency control
-mkdir $lock_dir, 0777 unless -d $lock_dir;
-my $active_count = 0;
-my $now = time();
-
-opendir(my $dh, $lock_dir);
-while (my $file = readdir($dh)) {
- next unless $file =~ /\.lock$/;
- my $path = "$lock_dir/$file";
- my $mtime = (stat($path))[9] || 0;
- ($now - $mtime > $lock_timeout) ? unlink($path) : $active_count++;
-}
-closedir($dh);
-
-# Template variables
-my $year = (localtime)[5] + 1900;
-my $search_text = '';
-
-# Busy check
-if ($active_count >= $max_parallel) {
- print "Content-Type: text/html\n\n";
- render_html("<p>Server busy. Please try again in a few seconds.</p>", "", $year);
- exit;
-}
-
-# Create semaphore lock
-my $lock_file = "$lock_dir/$$.lock";
-open(my $fh_lock, '>', $lock_file);
-
-# Query decoding
-if (($ENV{QUERY_STRING} || '') =~ /^q=([^&]*)/) {
- my $raw_q = $1;
- $raw_q =~ tr/+/ /;
- $search_text = uri_unescape($raw_q);
- $search_text = decode_utf8($search_text // "");
- $search_text =~ s/\P{Print}//g;
- $search_text = substr($search_text, 0, 64);
- $search_text =~ s/^\s+|\s+$//g;
-}
-
-my $safe_search_text = escape_html($search_text);
-
-print "Content-Type: text/html\n\n";
-
-if ($search_text eq '') {
- final_output("<p>Please enter a search term above.</p>");
-}
-
-# Binary search
-my @results;
-my $query = encode_utf8(lc($search_text));
-my $query_len = length($query);
-
-if (-f $sa_file && -f $cp_file) {
- open(my $fh_sa, '<', $sa_file) or die $!;
- open(my $fh_cp, '<', $cp_file) or die $!;
- binmode($fh_sa);
- binmode($fh_cp);
-
- my $file_map = retrieve($map_file);
- my $total_suffixes = (-s $sa_file) / 4;
-
- # Find left boundary
- my ($low, $high) = (0, $total_suffixes - 1);
- my $first_hit = -1;
-
- while ($low <= $high) {
- my $mid = int(($low + $high) / 2);
- seek($fh_sa, $mid * 4, 0);
- read($fh_sa, my $bin_off, 4);
- my $off = unpack("L", $bin_off);
- seek($fh_cp, $off, 0);
- read($fh_cp, my $text, $query_len);
-
- my $cmp = $text cmp $query;
- if ($cmp >= 0) {
- $first_hit = $mid if $cmp == 0;
- $high = $mid - 1;
- } else {
- $low = $mid + 1;
- }
- }
-
- # Collect results if found
- if ($first_hit != -1) {
- my $last_hit = $first_hit;
- ($low, $high) = ($first_hit, $total_suffixes - 1);
-
- # Find right boundary
- while ($low <= $high) {
- my $mid = int(($low + $high) / 2);
- seek($fh_sa, $mid * 4, 0);
- read($fh_sa, my $bin_off, 4);
- my $off = unpack("L", $bin_off);
- seek($fh_cp, $off, 0);
- read($fh_cp, my $text, $query_len);
-
- if (($text cmp $query) <= 0) {
- $last_hit = $mid if $text eq $query;
- $low = $mid + 1;
- } else {
- $high = $mid - 1;
- }
- }
-
- my %seen;
- for my $i ($first_hit .. $last_hit) {
- seek($fh_sa, $i * 4, 0);
- read($fh_sa, my $bin_off, 4);
- my $offset = unpack("L", $bin_off);
-
- foreach my $m (@$file_map) {
- if ($offset >= $m->{start} && $offset < $m->{end}) {
- if (!$seen{$m->{path}}++) {
- # Capture more than 50 chars for trimming
- my $snip_start = ($offset - 30 < $m->{start}) ? $m->{start} : $offset - 30;
- my $max_len = $m->{end} - $snip_start;
- my $read_len = ($max_len > 120) ? 120 : $max_len;
- seek($fh_cp, $snip_start, 0);
- read($fh_cp, my $raw_snip, $read_len);
-
- my $snippet = decode_utf8($raw_snip, Encode::FB_QUIET) // $raw_snip;
- $snippet =~ s/\s+/ /g; # Normalize whitespace
-
- # Trim start: Partial word removal
- if ($snip_start > $m->{start}) {
- $snippet =~ s/^[^\s]*\s//;
- }
-
- # Trim end: Length limit and partial word removal
- my $has_more = 0;
- if (length($snippet) > 50) {
- $snippet = substr($snippet, 0, 50);
- $has_more = 1 if $snippet =~ s/\s+[^\s]*$//;
- }
- elsif ($snip_start + $read_len < $m->{end}) {
- # This check handles snippets that are naturally short but
- # there's still more text in the article we didn't read
- $has_more = 1;
- }
-
- # Cleanup & capitalize
- $snippet = ucfirst($snippet);
- $snippet = escape_html($snippet) . ($has_more ? "..." : "");
-
- my $clean_path = $m->{path};
- $clean_path =~ s|^\.\./_site/||;
-
- push @results, {
- path => $clean_path,
- title => $m->{title},,
- snippet => $snippet
- };
- }
- last;
- }
- }
- last if scalar @results >= $max_results;
- }
- }
- close($fh_sa);
- close($fh_cp);
-}
-
-# --- Formatting & Output ---
-my $list_html = "";
-if (@results == 0) {
- $list_html = "<p>No results found for \"<b>$safe_search_text</b>\".</p>";
-} else {
- $list_html = "<ul>" . join('', map {
- "<li><a href=\"/$_->{path}\">$_->{title}</a><br><small>$_->{snippet}</small></li>"
- } @results) . "</ul>";
-}
-
-final_output($list_html);
-
-# --- Helpers ---
-sub final_output {
- my ($content) = @_;
- render_html($content, $safe_search_text, $year);
- if ($fh_lock) { close($fh_lock); unlink($lock_file); }
- exit;
-}
-
-sub render_html {
- my ($content, $q_val, $yr) = @_;
- print <<"HTML";
-<!DOCTYPE html>
-<html lang="en-us">
-<head>
- <meta charset="utf-8">
- <meta name="viewport" content="width=device-width, initial-scale=1">
- <title>Search</title>
- <link rel="stylesheet" href="/assets/css/main.css">
- <link rel="stylesheet" href="/assets/css/skeleton.css">
-</head>
-<body>
- <div id="nav-container" class="container">
- <ul id="navlist" class="left">
- <li><a href="/" class="link-decor-none">hme</a></li>
- <li><a href="/log/" class="link-decor-none">log</a></li>
- <li><a href="/projects/" class="link-decor-none">poc</a></li>
- <li><a href="/about/" class="link-decor-none">abt</a></li>
- <li class="active"><a href="/cgi-bin/find.cgi" class="link-decor-none">sws</a></li>
- <li><a href="/feed.xml" class="link-decor-none">rss</a></li>
- </ul>
- </div>
- <main class="container" id="main">
- <div class="container">
- <h2>Search</h2>
- <form action="" method="GET">
- <input id="search-box" type="text" name="q" value="$q_val">
- <input id="search-btn" type="submit" value="Search">
- </form>
- $content
- </div>
- </main>
- <div class="footer">
- <div class="container">
- <div class="twelve columns right container-2">
- <p id="footer-text">&copy; ASCIIMX - $yr</p>
- </div>
- </div>
- </div>
-</body>
-</html>
-HTML
-}
-
diff --git a/cgi-bin/_site/indexer.pl b/cgi-bin/_site/indexer.pl
deleted file mode 100644
index 69f6838..0000000
--- a/cgi-bin/_site/indexer.pl
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;
-use File::Find;
-use Storable qw(store);
-use Encode qw(encode_utf8);
-use HTML::Entities qw(decode_entities);
-use Time::HiRes qw(gettimeofday tv_interval);
-
-my $dir = '../_site/log';
-my $cgi_dir = '../_site/cgi-bin/';
-my $corpus_file = "${cgi_dir}corpus.bin";
-my $sa_file = "${cgi_dir}sa.bin";
-my $map_file = "${cgi_dir}file_map.dat";
-
-my %excluded_files = (
- 'index.html' => 1, # /log/index.html
-);
-
-# Start timing
-my $t0 = [gettimeofday];
-
-my $corpus = "";
-my @file_map;
-
-print "Building corpus...\n";
-
-find({
- wanted => sub {
- # Only index index.html files
- return unless -f $_ && $_ eq 'index.html';
-
- my $rel_path = $File::Find::name;
- $rel_path =~ s|^\Q$dir\E/?||;
- return if $excluded_files{$rel_path};
-
- if (open my $fh, '<:encoding(UTF-8)', $_) {
- my $content = do { local $/; <$fh> };
- close $fh;
-
- my ($title) = $content =~ m|<title>(.*?)</title>|is;
- $title //= (split('/', $File::Find::name))[-2]; # Fallback to folder name
- $title =~ s/^\s+|\s+$//g;
-
- # Extract content from <main> or use whole file
- my ($text) = $content =~ m|<main>(.*?)</main>|is;
- $text //= $content;
-
- # Strip tags and normalize whitespace
- $text =~ s|<pre[^>]*>.*?</pre>| |gs;
- $text =~ s|<code[^>]*>.*?</code>| |gs;
- $text =~ s|<[^>]+>| |g;
- $text = decode_entities($text);
- $text =~ s|\s+| |g;
- $text =~ s/^\s+|\s+$//g;
-
- # CRITICAL: Convert to lowercase and then to raw bytes
- # This ensures length() and substr() work on byte offsets for seek()
- my $raw_entry = encode_utf8(lc($text) . "\0");
-
- my $start = length($corpus);
- $corpus .= $raw_entry;
-
- push @file_map, {
- start => $start,
- end => length($corpus),
- title => $title,
- path => $File::Find::name
- };
- }
- },
- no_chdir => 0,
-}, $dir);
-
-print "Sorting suffixes...\n";
-
-# Initialize the array of indices
-my @sa = 0 .. (length($corpus) - 1);
-
-# Use a block that forces byte-level comparison
-{
- use bytes;
- @sa = sort {
- # First 64 bytes check (fast path)
- (substr($corpus, $a, 64) cmp substr($corpus, $b, 64)) ||
- # Full string fallback (required for correctness)
- (substr($corpus, $a) cmp substr($corpus, $b))
- } @sa;
-}
-
-print "Writing index files to disk...\n";
-
-open my $cfh, '>', $corpus_file or die "Cannot write $corpus_file: $!";
-binmode($cfh); # Raw byte mode
-print $cfh $corpus;
-close $cfh;
-
-open my $sfh, '>', $sa_file or die "Cannot write $sa_file: $!";
-binmode($sfh);
-# Pack as 32-bit unsigned integers (standard 'L')
-print $sfh pack("L*", @sa);
-close $sfh;
-
-store \@file_map, $map_file;
-
-my $elapsed = tv_interval($t0);
-my $c_size = -s $corpus_file;
-my $s_size = -s $sa_file;
-
-printf "\nIndexing Complete!\n";
-printf "Total Time: %.4f seconds\n", $elapsed;
-printf "Corpus Size: %.2f KB\n", $c_size / 1024;
-printf "Suffix Array: %.2f KB\n", $s_size / 1024;
-printf "Files Processed: %d\n", scalar(@file_map);
-
diff --git a/cgi-bin/_site/robots.txt b/cgi-bin/_site/robots.txt
deleted file mode 100644
index d297064..0000000
--- a/cgi-bin/_site/robots.txt
+++ /dev/null
@@ -1 +0,0 @@
-Sitemap: http://localhost:4000/sitemap.xml
diff --git a/cgi-bin/_site/sitemap.xml b/cgi-bin/_site/sitemap.xml
deleted file mode 100644
index 9bf9de2..0000000
--- a/cgi-bin/_site/sitemap.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd" xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
-</urlset>