summaryrefslogtreecommitdiffstats
path: root/man2html/scripts/cgi-bin
diff options
context:
space:
mode:
Diffstat (limited to 'man2html/scripts/cgi-bin')
-rwxr-xr-xman2html/scripts/cgi-bin/man/man2html109
-rwxr-xr-xman2html/scripts/cgi-bin/man/mansearch192
-rwxr-xr-xman2html/scripts/cgi-bin/man/mansearchhelp32
-rwxr-xr-xman2html/scripts/cgi-bin/man/mansec183
-rwxr-xr-xman2html/scripts/cgi-bin/man/manwhatis208
5 files changed, 724 insertions, 0 deletions
diff --git a/man2html/scripts/cgi-bin/man/man2html b/man2html/scripts/cgi-bin/man/man2html
new file mode 100755
index 0000000..7c515d1
--- /dev/null
+++ b/man2html/scripts/cgi-bin/man/man2html
@@ -0,0 +1,109 @@
+#!/bin/sh
+# man2html cgi script - uses /usr/bin/man2html to format man pages
+# auxiliary text files in /home/httpd/cgi-aux/man
+# aeb@cwi.nl - 980109
+
+MAN2HTML="/usr/bin/man2html"
+MANX="/home/httpd/cgi-aux/man/man.aux"
+
+# Do we need lynxcgi URLs? For the moment our criterion is
+# 1) HTTP_USER_AGENT=Lynx* and 2) HTTP_HOST is unset.
+AGENT="${HTTP_USER_AGENT-unknown}"
+
+case "$AGENT" in
+ Lynx*|lynx*)
+ HH="${HTTP_HOST-nohh}"
+ SED="s/%lynx //"
+ ;;
+ *)
+ HH=nolynx
+ SED="/%lynx/d"
+ ;;
+esac
+
+SERVER="${SERVER_NAME-localhost}"
+case "$HH" in
+ nohh)
+ LL="-l"
+ CG="lynxcgi:/home/httpd/cgi-bin/man"
+ ;;
+ *)
+ LL="-H$SERVER"
+ CG="http://$SERVER/cgi-bin/man"
+ ;;
+esac
+
+# Find the required page - expect to be called with "man2html [sec] page".
+# There may a prefixed "-M manpath" option.
+if [ $# -ge 2 -a x"$1" = x-M ]; then
+ MANPATH="$2"
+ export MANPATH
+ shift; shift
+ MP=" using the given MANPATH"
+else
+ MP=""
+fi
+
+# If no arguments given, show a start page.
+if [ $# = 0 ]; then
+ if [ -r $MANX ]; then
+ cat $MANX | sed "s#%cg#$CG#g; $SED"
+ else
+ "$MAN2HTML" -E "man2html: cannot open $MANX"
+ fi
+ exit 0
+fi
+
+if [ $# -gt 2 ]; then
+ "$MAN2HTML" -E "man2html: bad invocation: too many arguments"
+ exit 0
+fi
+
+# A single argument may be an explicitly give path name
+# Otherwise, ask man where to find it
+if [ $# = 1 ]; then
+ case "$1" in
+ /*)
+ PAGE="$1"
+ ;;
+ *)
+ PAGE=`man -w -c "$@" 2>/dev/null`
+ ;;
+ esac
+else
+ PAGE=`man -w -c "$@" 2>/dev/null`
+fi
+
+if [ x"$PAGE" = x ]; then
+ complaint="man2html: cannot find a page"
+ if [ $# = 1 ]; then
+ "$MAN2HTML" -E "$complaint for $1$MP"
+ else
+ "$MAN2HTML" -E "$complaint for $2 in section $1$MP"
+ fi
+ exit 0
+fi
+
+if [ -r "$PAGE" ]
+then
+ case "$PAGE" in
+ *.gz)
+ zcat "$PAGE" | "$MAN2HTML" "$LL" -D "$PAGE"
+ ;;
+ *.bz2)
+ bzcat "$PAGE" | "$MAN2HTML" "$LL" -D "$PAGE"
+ ;;
+ *)
+ "$MAN2HTML" "$LL" "$PAGE"
+ ;;
+ esac
+elif [ -r "$PAGE".gz ]
+then
+ zcat "$PAGE".gz | "$MAN2HTML" "$LL" -D "$PAGE"
+elif [ -r "$PAGE".bz2 ]
+then
+ bzcat "$PAGE".bz2 | "$MAN2HTML" "$LL" -D "$PAGE"
+else
+ "$MAN2HTML" -E "Strange... Cannot find (or read) $PAGE."
+fi
+exit 0
diff --git a/man2html/scripts/cgi-bin/man/mansearch b/man2html/scripts/cgi-bin/man/mansearch
new file mode 100755
index 0000000..2644db6
--- /dev/null
+++ b/man2html/scripts/cgi-bin/man/mansearch
@@ -0,0 +1,192 @@
+#!/bin/sh
+#
+# Interface to a glimpse search of the man pages.
+# Michael Hamilton <michael@actrix.gen.nz>
+# Small changes - aeb, 980109
+#
+
+# Do we need lynxcgi URLs? For the moment our criterion is
+# 1) HTTP_USER_AGENT=Lynx* and 2) HTTP_HOST is unset.
+AGENT="${HTTP_USER_AGENT-unknown}"
+
+case "$AGENT" in
+ Lynx*|lynx*)
+ HH="${HTTP_HOST-nohh}"
+ SED="s/%lynx //"
+ ;;
+ *)
+ HH=nolynx
+ SED="/%lynx/d"
+ ;;
+esac
+
+SERVER="${SERVER_NAME-localhost}"
+case "$HH" in
+ nohh)
+ CG="lynxcgi:/home/httpd/cgi-bin/man"
+ ;;
+ *)
+ CG="http://$SERVER/cgi-bin/man"
+ ;;
+esac
+QUOTE="'"
+export CG QUOTE SED
+
+exec awk '
+function removeopts(string) {
+ gsub(/^[ \t]/, "", string); # Remove leading spaces
+ gsub(/[ \t]$/, "", string); # Remove trailing spaces
+ gsub(/[ \t\\];/, ";", string); # Remove spaces before ;
+ gsub(/[ \t];/, ",", string); # Remove spaces before ,
+ while (match(string, /^-[FLBwk1-8]/)) {
+ if (match(string, /^-[FL]( |.)[^ \t]+[ \t]+/)) { # Option with arg
+ options = options " " substr(string, RSTART, RLENGTH);
+ string = substr(string, RSTART + RLENGTH);
+ }
+ else if (match(string, /^-[Bwk1-8][ \t]+/)) { # Option without arg
+ options = options " " substr(string, RSTART, RLENGTH);
+ string = substr(string, RSTART + RLENGTH);
+ }
+ else if (match(string, /^-[^ \t]/)) { # Remove it
+ string = substr(string, RSTART + RLENGTH);
+ }
+ }
+ return string;
+}
+
+BEGIN {
+
+ searchdocument = "/home/httpd/cgi-aux/man/mansearch.aux";
+ quote = ENVIRON["QUOTE"];
+ cgipath = ENVIRON["CG"];
+ sedcmd = ENVIRON["SED"];
+ truncate_at = 11; # Single page display match limit.
+
+ glimpse_cmd = "glimpse -z -H /var/man2html -y -W -i "
+
+ for (i = 1; i < ARGC; i++) {
+ string = string " " ARGV[i];
+ }
+ # Have to be careful to single quote this
+ # string later.
+ gsub(/[^a-zA-Z0-9-_+ \t\/@%:;,$*|]/, " ", string);
+
+ string = removeopts(string);
+
+ gsub(/[^a-zA-Z0-9-_+ \t\/@%:,]/, " ", options);
+
+ if (!string) {
+ if (system("test -r " searchdocument ) != 0) {
+ print "Content-type: text/html\n\n";
+ print "<head>";
+ print "<title>mansearch - file not found</title>";
+ print "</head>\n<body>";
+ print "Sorry - cannot read " searchdocument ".";
+ print "</body>";
+ exit;
+ }
+ system("sed " quote "s#%cg#" cgipath "#g;" sedcmd quote " " searchdocument );
+ exit;
+ }
+
+ print "Content-type: text/html";
+ print "";
+ print "<HTML>";
+ print "<HEAD>";
+ print "<TITLE>Manual Pages - Search Results: " string "</TITLE>";
+ print "</HEAD>";
+ print "<BODY>";
+
+ print "<H1>Manual Pages - Search Results</H1>";
+ print "<H2>Target text: " options " " string "</H2>";
+
+ print "<A HREF=\"" cgipath "/mansearch\">";
+ print "Perform another search";
+ print "</A><BR>";
+ print "<A HREF=\"" cgipath "/man2html\">";
+ print "Return to Main Contents";
+ print "</A>";
+
+ print "<HR>";
+
+ print "<DL>";
+ # Unless you like being hacked, the single
+ # forward quotes are most important.
+ cmd = glimpse_cmd " " options " " quote string quote " 2>/dev/null" ;
+
+ while ((cmd | getline matchline) > 0) {
+ if (split(matchline, part, ": ") == 1) {
+ continue;
+ }
+ else {
+ fullname = part[1];
+ }
+
+ if (fullname == "glimpse") {
+ print "<DT><B>"fullname"</B>:";
+ }
+ else if (fullname != last_fullname) {
+ mcount++;
+ tcount = 0;
+ last_fullname = fullname ;
+ last_text = "";
+
+ if (match(fullname, ".*/")) {
+ dirname = substr(fullname, 1, RLENGTH);
+ filename = substr(fullname, RLENGTH + 1);
+ if (dirname != last_dirname) {
+ last_dirname = dirname;
+ print "</DL>";
+ print "<H3>Location: " dirname "</H3>";
+ print "<DL>";
+ }
+ }
+ else {
+ filename = fullname;
+ }
+
+ if (match(filename, /\.[^.]+$/)) {
+ ref = substr(filename, 1, RSTART - 1) "+" substr(filename, RSTART + 1);
+ }
+ else {
+ ref = filename;
+ }
+ print "<DT> <a href=\"" cgipath "/man2html?" fullname "\">";
+ textname = filename;
+ sub(/\.(gz)|Z|z$/, "", textname);
+ sub(/\./, "(", textname);
+ textname = textname ")";
+ print textname;
+ print "</A>";
+ }
+
+ text = substr(matchline, length(fullname) + 2);
+ tcount++;
+ if (tcount < truncate_at) {
+ sub(/^ *.[^ ]+ /, "", text);
+ sub(/ +$/, "", text);
+ gsub(/\\f./, "", text);
+ gsub(/\\&/, "", text);
+ gsub(/\\/, "", text);
+ print "<DD>" text;
+ }
+ else if (tcount == truncate_at) {
+ print "<DD> <I>...additional matches not shown.</I>";
+ }
+ }
+
+ print "</DL>";
+ if (mcount == 0) {
+ print "No matches found.";
+ }
+ else if (mcount == 1) {
+ print "<HR>\n<P>1 match found."
+ }
+ else {
+ print "<HR>\n<P>" mcount " matches found."
+ }
+ print "</BODY>";
+ print "</HTML>";
+ exit;
+}' "$@"
+
diff --git a/man2html/scripts/cgi-bin/man/mansearchhelp b/man2html/scripts/cgi-bin/man/mansearchhelp
new file mode 100755
index 0000000..a96e796
--- /dev/null
+++ b/man2html/scripts/cgi-bin/man/mansearchhelp
@@ -0,0 +1,32 @@
+#!/bin/sh
+MAN2HTML=/usr/bin/man2html
+MANSH=/home/httpd/cgi-aux/man/mansearchhelp.aux
+
+# Do we need lynxcgi URLs? For the moment our criterion is
+# 1) HTTP_USER_AGENT=Lynx* and 2) HTTP_HOST is unset.
+AGENT="${HTTP_USER_AGENT-unknown}"
+case "$AGENT" in
+ Lynx*|lynx*)
+ HH="${HTTP_HOST-nohh}"
+ ;;
+ *)
+ HH=nolynx
+ ;;
+esac
+
+SERVER="${SERVER_NAME-localhost}"
+case "$HH" in
+ nohh)
+ CG="lynxcgi:/home/httpd/cgi-bin/man"
+ ;;
+ *)
+ CG="http://$SERVER/cgi-bin/man"
+ ;;
+esac
+
+if [ -r $MANSH ]; then
+ sed s#%cg#$CG#g $MANSH
+else
+ $MAN2HTML -E "man2html: cannot open $MANSH"
+fi
+exit 0
diff --git a/man2html/scripts/cgi-bin/man/mansec b/man2html/scripts/cgi-bin/man/mansec
new file mode 100755
index 0000000..24c189e
--- /dev/null
+++ b/man2html/scripts/cgi-bin/man/mansec
@@ -0,0 +1,183 @@
+#!/usr/bin/awk -f
+#
+# Generate an index into a manual section by using find.
+# Michael Hamilton <michael@actrix.gen.nz>
+# Small changes - aeb, 980109
+#
+BEGIN {
+
+ OFS="";
+
+ if (ARGC != 3) {
+ print "Content-type: text/html\n\n";
+ print "<head>";
+ print "<title>manwhatis - bad call</title>";
+ print "</head>\n<body>";
+ print "manwhatis: wrong number of arguments";
+ print "</body>";
+ exit;
+ }
+ cgipath = ARGV[1];
+ section = ARGV[2];
+
+ if (section !~ /^[1-8ln]$/ && section != "all") {
+ print "Content-type: text/html\n\n";
+ print "<head>";
+ print "<title>Manual - Illegal section</title>";
+ print "<body>";
+ print "Illegal section number '" section "'." ;
+ print "Must be one of 1,2,3,4,5,6,7,8,l,n or all";
+ print "</body>";
+ exit;
+ }
+
+ "echo $PPID" | getline pid;
+
+ if (cgipath ~ /lynxcgi/) {
+ cache_suffix = "l";
+ }
+ else {
+ cache_suffix = "h";
+ }
+
+ cache_dir = "/var/man2html";
+ cache_file = "manindex" cache_suffix "-" section ".html";
+ cache = cache_dir "/" cache_file;
+ cache_tmp = cache "_" pid;
+ buffer_tmp = cache "_items_" pid;
+
+ # Find out the man path
+ "man -w" | getline man_path
+ man_path = man_path ":";
+ gsub(":", " ", man_path);
+ # See if anything is out of date.
+ # Check all man[1-8] dir dates vs cache date
+ if (section == "all") {
+ if (system("test -f " cache) == 0) {
+ cmd = "find " man_path " -maxdepth 1 -name 'man[1-8]' -newer " cache;
+ cmd | getline need_update;
+ }
+ else {
+ need_update = 1;
+ }
+ }
+ else {
+ if (system("test -f " cache) == 0) {
+ cmd = "find " man_path " -maxdepth 1 -name man" section " -newer " cache;
+ cmd | getline need_update;
+ }
+ else {
+ need_update = 1;
+ }
+ }
+
+ if (need_update != "") {
+ if (system("test -w " cache_dir "/.") != 0) {
+ print "Content-type: text/html\n\n";
+ print "<head>";
+ print "<title>mansec - no cache</title>";
+ print "</head>\n<body>";
+ print "Sorry - cannot create index.";
+ print "No writable cache directory " cache_dir " exists.";
+ print "</body>";
+ exit;
+ }
+
+ sec_name[1] = "User Commands";
+ sec_name[2] = "System Calls";
+ sec_name[3] = "Library Functions";
+ sec_name[4] = "Special Files";
+ sec_name[5] = "File Formats";
+ sec_name[6] = "Games";
+ sec_name[7] = "Miscellany";
+ sec_name[8] = "Administration and Privileged Commands";
+ sec_name["all"] = "All available manual pages";
+ num_sections = 8;
+
+ # Print heading
+ print "Content-type: text/html\n\n" > cache_tmp;
+ print "<html>\n<head>" > cache_tmp;
+ print "<title>Manual Pages - Names: " section ". " sec_name[section] "</title>"> cache_tmp;
+ print "</head>\n<body>" > cache_tmp;
+ print "<h1>Manual Pages - Page Names</h1>" > cache_tmp;
+ print "<h2>Section " section ": " sec_name[section] "</h2>" > cache_tmp;
+
+ "hostname" | getline hostname;
+ "date" | getline date;
+ print hostname " (" date ")" > cache_tmp;
+
+ if (section != "all") {
+ sec_sub_dir = "/man" section;
+ }
+ else {
+ sec_sub_dir = "/man*";
+ }
+ gsub(" ", sec_sub_dir " ", man_path);
+
+ print "<p>Manual pages found under " man_path "." > cache_tmp;
+
+ # Find any man[1-8]/filenames
+ while ((("find " man_path " -follow -type f -printf '%f\n' | sort -f ") | getline manpage) > 0) {
+ # Check for new letter of alphabet
+ letter = tolower(substr(manpage,1,1));
+ if (letter != last_letter) {
+ last_letter = letter;
+ letter_index[++num_letters] = letter;
+ # Start a new alphabetic heading
+ print "<h2> <a name=\"", letter, "\">", toupper(letter), "</a></h2>" > buffer_tmp;
+ # Print out alphabetic quick index and other links
+ }
+ # Split page.n into "page" and "n" and generate an entry
+ sub(/[.]([zZ]|(gz))$/, "", manpage);
+ match(manpage, /[.][^.]+$/);
+ title = substr(manpage, 1, RSTART - 1);
+ if (section != "all") {
+ print "<a href=\"" cgipath "/man2html?", section, "+", title, "\">", title, "(", substr(manpage, RSTART + 1), ")</a>" > buffer_tmp;
+ }
+ else {
+ sec = substr(manpage, RSTART + 1)
+ print "<a href=\"" cgipath "/man2html?", sec, "+", title, "\">", title, "(", sec, ")</a>" > buffer_tmp;
+ }
+ }
+
+ close(buffer_tmp);
+
+ print "<p>" > cache_tmp;
+
+ # Print out alphabetic quick index and other links
+ for (i = 1; i <= num_letters; i++) {
+ print "<a href=\"#" letter_index[i] "\">" toupper(letter_index[i]) "</a>" > cache_tmp;
+ }
+
+ print "<p><hr>" > cache_tmp;
+ print "<a href=\"" cgipath "/man2html\">Return to Main Contents</a>" > cache_tmp;
+
+ print "<p>Other sections:" > cache_tmp;
+ for (i=1; i<=num_sections; i++) {
+ if (i != section) { # Dont print an entry for the section we are in
+ print "<a href=\"" cgipath "/mansec?" cgipath "+" i "\">" i ". " sec_name[i] "</a> " > cache_tmp;
+ }
+ }
+ print "<hr><p>" > cache_tmp;
+ # Print out the accumulated index entries
+ while ((getline < buffer_tmp) > 0) print > cache_tmp;
+ print "<hr><p>" > cache_tmp;
+ # Print out alphabetic quick index and other links
+ for (i = 1; i <= num_letters; i++) {
+ print "<a href=\"#" letter_index[i] "\">" toupper(letter_index[i]) "</a>" > cache_tmp;
+ }
+ print "<hr>" > cache_tmp;
+ print "<p><a href=\"" cgipath "/man2html\">Return to Main Contents</a>" > cache_tmp;
+ print "<p>Other sections:" > cache_tmp;
+ for (i=1; i<=num_sections; i++) {
+ if (i != section) { # Dont print an entry for the section we are in
+ print "<a href=\"" cgipath "/mansec?" cgipath "+" i "\">" i ". " sec_name[i] "</a> " > cache_tmp;
+ }
+ }
+ print "</body>\n</html>" > cache_tmp;
+ system("/bin/mv " cache_tmp " " cache);
+ system("/bin/rm -f " buffer_tmp);
+ }
+ system("/bin/cat " cache);
+ exit;
+}
diff --git a/man2html/scripts/cgi-bin/man/manwhatis b/man2html/scripts/cgi-bin/man/manwhatis
new file mode 100755
index 0000000..d14a516
--- /dev/null
+++ b/man2html/scripts/cgi-bin/man/manwhatis
@@ -0,0 +1,208 @@
+#!/usr/bin/awk -f
+#
+# Generate a whatis index into the manual pages by using find to
+# locate all the whatis files.
+# Michael Hamilton <michael@actrix.gen.nz>
+# Small changes - aeb, 980109
+#
+BEGIN {
+
+ OFS="";
+
+ if (ARGC != 3) {
+ print "Content-type: text/html\n\n";
+ print "<head>";
+ print "<title>manwhatis - bad call</title>";
+ print "</head>\n<body>";
+ print "manwhatis: wrong number of arguments";
+ print "</body>";
+ exit;
+ }
+ cgipath = ARGV[1];
+ section = ARGV[2];
+
+ if (section !~ /^[1-8ln]$/) {
+ print "Content-type: text/html\n\n";
+ print "<head>";
+ print "<title>Manual - Illegal section</title>";
+ print "</head>\n<body>";
+ print "Illegal section number '" section "'." ;
+ print "Must be one of 1,2,3,4,5,6,7,8,l,n";
+ print "</body>";
+ exit;
+ }
+
+ if (cgipath ~ /lynxcgi/) {
+ cache_suffix = "l";
+ }
+ else {
+ cache_suffix = "h";
+ }
+
+ cache_dir = "/var/man2html";
+ cache_file = "whatis" cache_suffix "-" section ".html";
+ cache = cache_dir "/" cache_file;
+
+
+ # Find out the man path
+ "man -w" | getline man_path
+ gsub(":", " ", man_path);
+ # See if anything is out of date.
+ if (system("test -f " cache) == 0) {
+ cmd = "find " man_path " -maxdepth 1 -name whatis -newer " cache;
+ cmd | getline need_update;
+ }
+ else {
+ need_update = 1;
+ }
+
+ if (need_update != "") {
+
+ if (system("test -w " cache_dir "/.") != 0) {
+ print "Content-type: text/html\n\n";
+ print "<head>";
+ print "<title>manwhatis - no cache</title>";
+ print "</head>\n<body>";
+ print "Sorry - cannot create index.";
+ print "No writable cache directory " cache_dir " exists.";
+ print "</body>";
+ exit;
+ }
+
+ "echo $PPID" | getline pid;
+
+ cache_tmp = cache "_" pid;
+ sort_tmp = cache_dir "/manwhatis_tmp_" pid ;
+ buffer_tmp = cache_dir "/manwhatis_tmp2_" pid;
+
+ sec_name[1] = "User Commands";
+ sec_name[2] = "System Calls";
+ sec_name[3] = "Library Functions";
+ sec_name[4] = "Special Files";
+ sec_name[5] = "File Formats";
+ sec_name[6] = "Games";
+ sec_name[7] = "Miscellany";
+ sec_name[8] = "Administration and Privileged Commands";
+ num_sections = 8;
+ # Print heading
+ print "Content-type: text/html\n\n" > cache_tmp;
+ print "<html>\n<head>" > cache_tmp;
+ print "<title>Manual Pages - Names and Descriptions: " section ". " sec_name[section] "</title>" > cache_tmp;
+
+ print "</head>\n<body>" > cache_tmp;
+ print "<h1>Manual Pages - Names and Descriptions</h1>" > cache_tmp;
+ print "<h1>Section " section ": " sec_name[section] "</h1>" > cache_tmp;
+ "hostname" | getline hostname;
+ "date" | getline date;
+ print hostname " (" date ")" > cache_tmp;
+ # Find out the man path
+ "man -w" | getline;
+ $1 = $1 ":";
+ gsub(":", " ", $1);
+
+ find_cmd = "find " man_path " -maxdepth 1 -name whatis -printf '%p '";
+ find_cmd | getline whatis_files;
+ close(find_cmd);
+
+ if (whatis_files == "") {
+ print "Content-type: text/html\n\n";
+ print "<head>";
+ print "<title>Manwhatis - Error updating index</title>";
+ print "</head>\n<body>";
+ print "Unable to find whatis files - Sorry."
+ print "</body>";
+ exit;
+ }
+ # Try to parse valid entries - those that contain ([0-9])
+ # Note that egrep is sometimes in /bin, sometimes in /usr/bin
+ extract_cmd = "egrep -h '\\(" section "[A-Za-z]*\\)' " whatis_files ;
+
+ print "<br>Manual pages referenced in " whatis_files "<p>" > cache_tmp;
+
+ # Note that sort sometimes lives in /bin and sometimes in /usr/bin
+ sort_cmd = "sort -f >> " sort_tmp;
+
+ while ( (extract_cmd | getline) > 0 ) {
+ if (bracket_pos = index($0, "(")) {
+ sec_full_num = substr($0, bracket_pos + 1, index($0, ")") - bracket_pos - 1);
+ names = substr($0, 1, bracket_pos - 2);
+ # Get rid of blanks and commas.
+ n = split(names, name_list, " *, *");
+ description = substr($0, bracket_pos + length(sec_full_num) + 2);
+ # Get rid of AT&T
+ gsub("&", "\&amp;", description);
+ # Generate a entry for each name
+ for (i = 1; i <= n; i++) {
+ print name_list[i] " " sec_full_num " " name_list[1] " / " description | sort_cmd;
+ }
+ }
+ }
+ close(extract_cmd);
+ close(sort_cmd);
+
+ while ((getline < sort_tmp) > 0) {
+
+ letter = tolower(substr($1,1,1));
+ if (letter != last_letter) {
+ if (last_letter) {
+ print "</dl><p>" > buffer_tmp;
+ }
+ last_letter = letter;
+ letter_index[++num_letters] = letter;
+ # Terminate list, start a new one
+
+ print "<h2> <a name=\"", letter, "\">", toupper(letter), "</a></h2>\n<dl>" > buffer_tmp ;
+ }
+ # Generate a <dt> for the name
+ if ($3 != last_file || $1 != last_name) { # Don't repeat the same entry link.
+ print "<dt><a href=\"" cgipath "/man2html?", $2, "+", $3, "\">", $1, "(", $2, ")", "</a>" > buffer_tmp;
+ last_file = $3;
+ last_name = $1;
+ }
+ print "<dd>", substr($0, match($0, "/") + 1) > buffer_tmp;
+ }
+ # Finish off last list
+
+ print "\n</dl><p>" > buffer_tmp;
+ close(buffer_tmp);
+
+ system("/bin/rm " sort_tmp);
+
+ # Print out alphabetic quick index and other links
+ for (i = 1; i <= num_letters; i++) {
+ print "<a href=\"#" letter_index[i] "\">" toupper(letter_index[i]) "</a>" > cache_tmp;
+ }
+ print "<hr>" > cache_tmp;
+ print "<a href=\"" cgipath "/man2html\">Return to Main Contents</a>" > cache_tmp;
+
+ print "<p>Other sections:" > cache_tmp;
+ for (i=1; i<=num_sections; i++) {
+ if (i != section) { # Dont print an entry for the section we are in
+ print "<a href=\"" cgipath "/manwhatis?" cgipath "+" i "\">" i ". " sec_name[i] "</a> " > cache_tmp;
+ }
+ }
+ print "<hr><p>" > cache_tmp;
+ # Print out the accumulated contents entries
+ while ((getline < buffer_tmp) > 0) print > cache_tmp;
+ print "<hr><p>" > cache_tmp;
+
+ for (i = 1; i <= num_letters; i++) {
+ print "<a href=\"#" letter_index[i] "\">" toupper(letter_index[i]) "</a>" > cache_tmp;
+ }
+ print "<hr>" > cache_tmp;
+ print "<p><a href=\"" cgipath "/man2html\">Return to Main Contents</a>" > cache_tmp;
+
+ print "<p>Other sections:" > cache_tmp;
+ for (i=1; i<=num_sections; i++) {
+ if (i != section) { # Dont print an entry for the section we are in
+ print "<a href=\"" cgipath "/manwhatis?" cgipath "+" i "\">" i ". " sec_name[i] "</a> " > cache_tmp;
+ }
+ }
+ print "</body>" > cache_tmp;
+ print "</html>" > cache_tmp;
+ system("/bin/mv " cache_tmp " " cache);
+ system("/bin/rm " buffer_tmp);
+ }
+ system("/bin/cat " cache);
+ exit;
+}