diff options
| author | Sadeep Madurange <sadeep@asciimx.com> | 2026-01-01 18:33:54 +0800 |
|---|---|---|
| committer | Sadeep Madurange <sadeep@asciimx.com> | 2026-01-01 18:33:54 +0800 |
| commit | 7375a6b8c6ac05f79755e27afeb3062d027c37f2 (patch) | |
| tree | 93d204976ecf0d9a7bf02064ee2116af272624b9 | |
| parent | 5d84833e25ed23e40ac5703527469101448b9b66 (diff) | |
| download | www-7375a6b8c6ac05f79755e27afeb3062d027c37f2.tar.gz | |
Optimize search and add guards.
| -rw-r--r-- | README.txt | 11 | ||||
| -rw-r--r-- | cgi-bin/find.cgi | 150 | ||||
| -rw-r--r-- | cgi-bin/indexer.pl | 29 |
3 files changed, 111 insertions, 79 deletions
@@ -12,12 +12,13 @@ $ cd /var/www/htdocs # find . -type d -exec chmod 554 {} + # chmod 554 cgi-bin/find.cgi -Checking CGI script errors in chroot: +Search lock setup in chroot -# chroot /var/www/ htdocs/www.asciimx.com/cgi-bin/find.cgi +# mkdir -p /var/www/tmp/search_locks +# chown www:www /var/www/tmp/search_locks +# chmod 700 /var/www/tmp/search_locks -Search Perl deps: +Checking CGI script errors in chroot: -# doas pkg_add p5-File-Slurper -# doas pkg_add p5-HTML-TreeBuilder-XPath +# chroot /var/www/ htdocs/www.asciimx.com/cgi-bin/find.cgi diff --git a/cgi-bin/find.cgi b/cgi-bin/find.cgi index fc1d4af..3e5c8a2 100644 --- a/cgi-bin/find.cgi +++ b/cgi-bin/find.cgi @@ -1,86 +1,124 @@ #!/usr/bin/perl +use strict; +use warnings; +use Storable qw(retrieve); use Encode qw(decode_utf8); use HTML::Escape qw(escape_html); -my $search_text = ''; +# Configuration +my $max_parallel = 50; # max no. of parallel searches +my $lock_timeout = 30; # drop stale locks after this many seconds +my $max_results = 20; # max search results +my $min_query_len = 3; # min query length to avoid matching 'a', 'e' +my $index_file = 'search_index.dat'; # index file +my $lock_dir = '/tmp/search_locks'; # lock file directory + +# Concurrency control +mkdir $lock_dir, 0777 unless -d $lock_dir; +my $active_count = 0; +my $now = time(); + +opendir(my $dh, $lock_dir); +while (my $file = readdir($dh)) { + next unless $file =~ /\.lock$/; + my $path = "$lock_dir/$file"; + my $mtime = (stat($path))[9] || 0; + ( $now - $mtime > $lock_timeout ) ? unlink($path) : $active_count++; +} +closedir($dh); + +# Too many search requests +if ($active_count >= $max_parallel) { + print "Content-Type: text/html\n\n"; + render_html("<p>Server busy. Please try again in a few seconds.</p>", "", (localtime)[5]+1900); + exit; +} -if ($ENV{QUERY_STRING} =~ /^q=([^&]*)/) { +my $lock_file = "$lock_dir/$$.lock"; +open(my $fh_lock, '>', $lock_file); + +# Decode search text as utf-8, toss non-printable chars, trim +my $search_text = ''; +if (($ENV{QUERY_STRING} || '') =~ /^q=([^&]*)/) { $search_text = decode_utf8($1 // ""); - $search_text =~ s/\P{Print}//g; # toss any non-printable utf-8 characters + $search_text =~ s/\P{Print}//g; $search_text = substr($search_text, 0, 64); $search_text =~ s/^\s+|\s+$//g; } -my @results; +# Pre-prepare common template variables +my $safe_search_text = escape_html($search_text); +my $year = (localtime)[5] + 1900; -# Search only index.html files inside the first level of subdirectories -my $start_dir = '../log'; -my @files = glob("$start_dir/*/index.html"); +print "Content-Type: text/html\n\n"; -foreach my $path (@files) { - # Skip if the path is a symlink or not a file - next if -l $path || ! -f $path; +# Input validation +if ($search_text eq '') { + final_output("<p>Please enter a search term above.</p>"); +} - next unless open(my $fh, '<:utf8', $path); - my $html = do { local $/; <$fh> }; - close($fh); - - my ($text) = $html =~ m|<main>(.*?)</main>|is; - $text =~ s|<[^>]+>| |g; - $text =~ s|\s+| |g; +if (length($search_text) < $min_query_len) { + final_output("<p>Search term is too short. Please enter at least $min_query_len characters.</p>"); +} + +if (!-f $index_file) { + final_output("<p>Search temporarily unavailable.</p>"); +} - next unless $text =~ /(.{0,40})(\Q$search_text\E)(.{0,40})/is; +my $index = retrieve($index_file); +my @results; +my $found = 0; + +foreach my $url (sort keys %$index) { + last if $found >= $max_results; + my $data = $index->{$url}; + + # Grab 80 char snippet to chop at a word boundary later + next unless $data->{c} =~ /(.{0,40})(\Q$search_text\E)(.{0,40})/is; my ($before, $actual, $after) = ($1, $2, $3); + $found++; - # Trim if we cut into the middle of a sentence + # Chop at 25 or word boundary $after =~ s/\s\S*$// if length($after) > 25; $before =~ s/^.*?\s// if length($before) > 25; - if ($before =~ /\S/) { # If before has non-whitespace characters - $before = ucfirst($before); - } else { - $before = ""; # Clear any stray spaces - $actual = ucfirst($actual); - } - - my $safe_before = escape_html($before); - my $safe_actual = escape_html($actual); - my $safe_after = escape_html($after); - my $snippet = "${safe_before}<b>${safe_actual}</b>${safe_after}..."; + $before = ($before =~ /\S/) ? ucfirst($before) : ""; + $actual = ($before eq "") ? ucfirst($actual) : $actual; - my ($title) = $html =~ m|<title>(.*?)</title>|is; - my $safe_title = escape_html($title); - - $path =~ s|^\.\./||; + my $snippet = escape_html($before) . "<b>" . escape_html($actual) . "</b>" . escape_html($after) . "..."; push @results, { - path => $path, - title => $safe_title, + path => $url, + title => escape_html($data->{t}), snippet => $snippet }; } -print "Content-Type: text/html\n\n"; - -my $list; -if ($search_text eq '') { - $list = "<p>Please enter a search term above.</p>"; -} elsif (@results == 0) { - $list = "<p>No results found for \"<b>$search_text</b>\".</p>"; +# Format results list +my $list_html = ""; +if (@results == 0) { + $list_html = "<p>No results found for \"<b>$safe_search_text</b>\".</p>"; } else { - $list = "<ul>"; - foreach my $res (@results) { - my $url = $res->{path}; - $list .= "<li><a href=\"/$url\">$res->{title}</a><br><small>$res->{snippet}</small></li>"; - } - $list .= "</ul>"; + $list_html = "<ul>" . join('', map { + "<li><a href=\"/$_->{path}\">$_->{title}</a><br><small>$_->{snippet}</small></li>" + } @results) . "</ul>"; } -my $safe_search_text = escape_html($search_text); -my $year = (localtime)[5] + 1900; +final_output($list_html); -print <<"HTML"; +# Helper to ensure layout is always preserved +sub final_output { + my ($content) = @_; + render_html($content, $safe_search_text, $year); + close($fh_lock) if $fh_lock; + unlink($lock_file) if -f $lock_file; + exit; +} + +sub render_html { + my ($content, $q_val, $yr) = @_; + print <<"HTML"; <!DOCTYPE html> <html lang="en-us"> <head> @@ -105,19 +143,21 @@ print <<"HTML"; <div class="container"> <h2>Search</h2> <form action="" method="GET"> - <input id="search-box" type="text" name="q" value="$safe_search_text"> + <input id="search-box" type="text" name="q" value="$q_val"> <input id="search-btn" type="submit" value="Search"> </form> - $list + $content </div> </main> <div class="footer"> <div class="container"> <div class="twelve columns right container-2"> - <p id="footer-text">© ASCIIMX - $year</p> + <p id="footer-text">© ASCIIMX - $yr</p> </div> </div> </div> </body> </html> HTML +} + diff --git a/cgi-bin/indexer.pl b/cgi-bin/indexer.pl index d0314a1..38a918e 100644 --- a/cgi-bin/indexer.pl +++ b/cgi-bin/indexer.pl @@ -5,13 +5,13 @@ use warnings; use Storable qw(nstore); use HTML::Entities qw(decode_entities); -my $built_site_dir = '../_site/log'; -my $output_file = 'search_index.dat'; +# --- Configuration --- +my $built_site_dir = '../_site/log'; +my $output_file = '../_site/cgi-bin/search_index.dat'; my %index; print "Building search index from $built_site_dir...\n"; -# glob finds every index.html in subdirectories of /log/ foreach my $path (glob("$built_site_dir/*/index.html")) { next unless open(my $fh, '<:utf8', $path); my $html = do { local $/; <$fh> }; @@ -22,23 +22,19 @@ foreach my $path (glob("$built_site_dir/*/index.html")) { my ($main) = $html =~ m|<main>(.*?)</main>|is; $main //= ''; - # Remove code and pre blocks to keep index prose-only + # Strip HTML and clean prose $main =~ s|<pre[^>]*>.*?</pre>| |gs; $main =~ s|<code[^>]*>.*?</code>| |gs; - - # Strip all remaining HTML tags $main =~ s|<[^>]+>| |g; - - # Decode entities (e.g., & -> &) for accurate searching $main = decode_entities($main); - - # Normalize whitespace (squash multiple spaces/newlines) $main =~ s|\s+| |g; $main =~ s/^\s+|\s+$//g; - # Map file path to the final web URL - # Example: ../_site/log/arduino/index.html -> /log/arduino/index.html - (my $url = $path) =~ s|^\.\./_site/|/|; + # Normalize path + my $url = $path; + $url =~ s|^\.\./_site/||; # Remove local build directory + $url =~ s|^\.\./||; # Remove any leading dots + $url =~ s|^/+||; # Remove leading slashes $index{$url} = { t => $title || "Untitled", @@ -46,10 +42,5 @@ foreach my $path (glob("$built_site_dir/*/index.html")) { }; } -# Save using network-order binary (nstore) for portability nstore(\%index, $output_file); - -my $count = scalar(keys %index); -my $size = -s $output_file; -printf("Index complete: %d files (%.2f KB)\n", $count, $size / 1024); - +printf("Index complete: %d files (%.2f KB)\n", scalar(keys %index), (-s $output_file) / 1024); |
