diff options
| author | Ralph Amissah <ralph.amissah@gmail.com> | 2021-06-27 16:59:44 -0400 | 
|---|---|---|
| committer | Ralph Amissah <ralph.amissah@gmail.com> | 2021-08-01 07:46:58 -0400 | 
| commit | 4d5ee2ead874c7a436faa8754eb99215927ea94d (patch) | |
| tree | c1519c0b8db2302bcf46ffdce04a6580d7a012e5 /misc/util | |
| parent | org-mode code block headers (diff) | |
dir rename ./sundry (from ./misc)
Diffstat (limited to 'misc/util')
| -rw-r--r-- | misc/util/d/cgi/search/README | 11 | ||||
| -rw-r--r-- | misc/util/d/cgi/search/dub.sdl | 16 | ||||
| -rw-r--r-- | misc/util/d/cgi/search/src/spine_cgi_sqlite_search.d | 963 | ||||
| -rw-r--r-- | misc/util/d/tools/markup_conversion/README | 1 | ||||
| -rwxr-xr-x | misc/util/d/tools/markup_conversion/endnotes_inline_from_binary.d | 123 | ||||
| -rw-r--r-- | misc/util/d/tools/markup_conversion/markup_changes.d | 136 | ||||
| -rwxr-xr-x | misc/util/d/tools/markup_conversion/markup_changes_header_and_content.d | 244 | ||||
| -rwxr-xr-x | misc/util/d/tools/markup_conversion/markup_conversion_from_sisu_ruby_to_sisu_spine.d | 367 | ||||
| -rwxr-xr-x | misc/util/d/tools/spine_scaffold.d | 134 | ||||
| -rwxr-xr-x | misc/util/rb/cgi/spine.search.cgi | 952 | ||||
| -rwxr-xr-x | misc/util/rb/tex/dr_tex.rb | 120 | 
11 files changed, 0 insertions, 3067 deletions
| diff --git a/misc/util/d/cgi/search/README b/misc/util/d/cgi/search/README deleted file mode 100644 index eb8fcde..0000000 --- a/misc/util/d/cgi/search/README +++ /dev/null @@ -1,11 +0,0 @@ -change db name to match name of db you create -cv.db_selected = "spine.search.sql.db"; - -~dr/bin/spine-ldc -v --sqlite-db-create --sqlite-db-filename="spine.search.db" --cgi-sqlite-search-filename="spine-search" --output=/var/www ~grotto/repo/git.repo/code/project-spine/doc-reform-markup/markup_samples/markup/pod/* - -~dr/bin/spine-ldc -v  --sqlite-update --sqlite-db-filename="spine.search.db" --output=/var/www ~grotto/repo/git.repo/code/project-spine/doc-reform-markup/markup_samples/markup/pod/* - -cd util/d/cgi/search/src -dub --force --compiler=ldc2 && sudo cp -v cgi-bin/spine-search /usr/lib/cgi-bin/. - -http://localhost/cgi-bin/spine-search? diff --git a/misc/util/d/cgi/search/dub.sdl b/misc/util/d/cgi/search/dub.sdl deleted file mode 100644 index 30b076d..0000000 --- a/misc/util/d/cgi/search/dub.sdl +++ /dev/null @@ -1,16 +0,0 @@ -name "spine_search" -description "A minimal D application." -authors "ralph" -copyright "Copyright © 2021, ralph" -license "GPL-3.0+" -dependency "d2sqlite3" version="~>0.18.3" -dependency "arsd-official": "7.2.0" -  subConfiguration "arsd-official:cgi" "cgi" -targetType "executable" -targetPath "./cgi-bin" -mainSourceFile "src/spine_cgi_sqlite_search.d" -configuration "default" { -  targetType "executable" -  targetName "spine-search" -  postGenerateCommands "notify-send -t 0 'D executable ready' 'spine cgi sqlite search d'" -} diff --git a/misc/util/d/cgi/search/src/spine_cgi_sqlite_search.d b/misc/util/d/cgi/search/src/spine_cgi_sqlite_search.d deleted file mode 100644 index 987c319..0000000 --- a/misc/util/d/cgi/search/src/spine_cgi_sqlite_search.d +++ /dev/null @@ -1,963 +0,0 @@ -/+ -- Name: Spine, Doc Reform [a part of] -  - Description: documents, structuring, processing, publishing, search -    - static content generator - -  - Author: Ralph Amissah -    [ralph.amissah@gmail.com] - -  - Copyright: (C) 2015 - 2021 Ralph Amissah, All Rights -    Reserved. - -  - License: AGPL 3 or later: - -    Spine (SiSU), a framework for document structuring, publishing and -    search - -    Copyright (C) Ralph Amissah - -    This program is free software: you can redistribute it and/or modify it -    under the terms of the GNU AFERO General Public License as published by the -    Free Software Foundation, either version 3 of the License, or (at your -    option) any later version. - -    This program is distributed in the hope that it will be useful, but WITHOUT -    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -    FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -    more details. - -    You should have received a copy of the GNU General Public License along with -    this program. If not, see [https://www.gnu.org/licenses/]. - -    If you have Internet connection, the latest version of the AGPL should be -    available at these locations: -    [https://www.fsf.org/licensing/licenses/agpl.html] -    [https://www.gnu.org/licenses/agpl.html] - -  - Spine (by Doc Reform, related to SiSU) uses standard: -    - docReform markup syntax -      - standard SiSU markup syntax with modified headers and minor modifications -    - docReform object numbering -      - standard SiSU object citation numbering & system - -  - Hompages: -    [https://www.doc_reform.org] -    [https://www.sisudoc.org] - -  - Git -    [https://git.sisudoc.org/projects/?p=software/spine.git;a=summary] - -+/ -/+ dub.sdl -  name "spine search" -  description "spine cgi search" -+/ -import std.format; -import std.range; -import std.regex; -import arsd.cgi; -import d2sqlite3; -import std.process : environment; -void cgi_function_intro(Cgi cgi) { -  string header; -  string table; -  string form; -  struct Config { -    string http_request_type; -    string http_host; -    // string server_name; -    string doc_root; -    string cgi_root; -    string cgi_script; -    string data_path_html; -    string db_path; -    string query_string; -    string http_url; -    string request_method; -  } -  auto conf = Config(); -  conf.http_request_type    = environment.get("REQUEST_SCHEME",        "http"); -  conf.http_host            = environment.get("HTTP_HOST",             "localhost"); -  // conf.server_name          = environment.get("SERVER_NAME",           "localhost"); -  conf.doc_root             = environment.get("DOCUMENT_ROOT",         "/var/www/html"); -  conf.cgi_root             = environment.get("CONTEXT_DOCUMENT_ROOT", "/usr/lib/cgi-bin/"); -  // conf.cgi_script           = environment.get("SCRIPT_NAME",           "/cgi-bin/spine-search"); -  conf.query_string         = environment.get("QUERY_STRING",          ""); -  conf.http_url             = environment.get("HTTP_REFERER",          conf.http_request_type ~ "://" ~ conf.http_host ~ conf.cgi_script ~ "?" ~ conf.query_string); -  conf.db_path              = "/var/www/html/sqlite/";                 // conf.http_host ~ "/sqlite/"; -  conf.request_method       = environment.get("REQUEST_METHOD",        "POST"); -  struct CGI_val { -    string db_selected      = ""; -    string sql_match_limit  = "";     // radio: ( 1000 | 2500 ) -    string sql_match_offset = ""; -    string search_text      = ""; -    string results_type     = "";     // index -    bool   checked_echo     = false; -    bool   checked_stats    = false; -    bool   checked_url      = false; -    bool   checked_searched = false; -    bool   checked_tip      = false; -    bool   checked_sql      = false; -  } -  auto cv = CGI_val(); -  cv.db_selected = "spine.search.db"; // config, set db name -  auto text_fields() { -    string canned_query_str = environment.get("QUERY_STRING", ""); -    if ("query_string" in cgi.post) { -      canned_query_str = environment.get("QUERY_STRING", ""); -    } -    string[string] canned_query; -    if (conf.request_method == "POST") { -    } else if (conf.request_method == "GET") { -      foreach (pair_str; canned_query_str.split("&")) { -        // cgi.write(pair_str ~ "<br>"); -        string[] pair = pair_str.split("="); -        canned_query[pair[0]] = pair[1]; -      } -      // foreach (field, content; canned_query) { -      //   cgi.write(field ~ ": " ~ content ~ "<br>"); -      // } -    } -    static struct Rgx { -      // static canned_query   = ctRegex!(`\A(?P<matched>.+)\Z`,                            "m"); -      static search_text_area  = ctRegex!(`\A(?P<matched>.+)\Z`,                            "m"); -      // static fulltext       = ctRegex!(`\A(?P<matched>.+)\Z`,                            "m"); -      static line              = ctRegex!(`^(?P<matched>.+?)(?: ~|$)`,                      "m"); -      static text              = ctRegex!(`(?:^|\s~\s*)text:\s+(?P<matched>.+?)(?: ~|$)`,   "m"); -      static author            = ctRegex!(`(?:^|\s~\s*)author:\s+(?P<matched>.+)$`,         "m"); -      static title             = ctRegex!(`(?:^|\s~\s*)title:\s+(?P<matched>.+)$`,          "m"); -      static uid               = ctRegex!(`(?:^|\s~\s*)uid:\s+(?P<matched>.+)$`,            "m"); -      static fn                = ctRegex!(`(?:^|\s~\s*)fn:\s+(?P<matched>.+)$`,             "m"); -      static keywords          = ctRegex!(`(?:^|\s~\s*)keywords:\s+(?P<matched>.+)$`,       "m"); -      static topic_register    = ctRegex!(`(?:^|\s~\s*)topic_register:\s+(?P<matched>.+)$`, "m"); -      static subject           = ctRegex!(`(?:^|\s~\s*)subject:\s+(?P<matched>.+)$`,        "m"); -      static description       = ctRegex!(`(?:^|\s~\s*)description:\s+(?P<matched>.+)$`,    "m"); -      static publisher         = ctRegex!(`(?:^|\s~\s*)publisher:\s+(?P<matched>.+)$`,      "m"); -      static editor            = ctRegex!(`(?:^|\s~\s*)editor:\s+(?P<matched>.+)$`,         "m"); -      static contributor       = ctRegex!(`(?:^|\s~\s*)contributor:\s+(?P<matched>.+)$`,    "m"); -      static date              = ctRegex!(`(?:^|\s~\s*)date:\s+(?P<matched>.+)$`,           "m"); -      static results_type      = ctRegex!(`(?:^|\s~\s*)type:\s+(?P<matched>.+)$`,           "m"); -      static format            = ctRegex!(`(?:^|\s~\s*)format:\s+(?P<matched>.+)$`,         "m"); -      static identifier        = ctRegex!(`(?:^|\s~\s*)identifier:\s+(?P<matched>.+)$`,     "m"); -      static source            = ctRegex!(`(?:^|\s~\s*)source:\s+(?P<matched>.+)$`,         "m"); -      static language          = ctRegex!(`(?:^|\s~\s*)language:\s+(?P<matched>.+)$`,       "m"); -      static relation          = ctRegex!(`(?:^|\s~\s*)relation:\s+(?P<matched>.+)$`,       "m"); -      static coverage          = ctRegex!(`(?:^|\s~\s*)coverage:\s+(?P<matched>.+)$`,       "m"); -      static rights            = ctRegex!(`(?:^|\s~\s*)rights:\s+(?P<matched>.+)$`,         "m"); -      static comment           = ctRegex!(`(?:^|\s~\s*)comment:\s+(?P<matched>.+)$`,        "m"); -      // static abstract_         = ctRegex!(`(?:^|\s~\s*)abstract:\s+(?P<matched>.+)$`,       "m"); -      static src_filename_base = ctRegex!(`^src_filename_base:\s+(?P<matched>.+)$`,         "m"); -    } -    struct searchFields { -      string canned_query      = ""; // GET  canned_query     == cq -      string search_text_area  = ""; // POST search_text_area == tsa -      string text              = ""; // text              == txt -      string author            = ""; // author            == au -      string title             = ""; // title             == ti -      string uid               = "";  // uid               == uid -      string fn                = ""; // fn                == fn -      string keywords          = ""; // keywords          == kw -      string topic_register    = ""; // topic_register    == tr -      string subject           = ""; // subject           == su -      string description       = ""; // description       == de -      string publisher         = ""; // publisher         == pb -      string editor            = ""; // editor            == ed -      string contributor       = ""; // contributor       == ct -      string date              = ""; // date              == dt -      string format            = ""; // format            == fmt -      string identifier        = ""; // identifier        == id -      string source            = ""; // source            == src sfn -      string language          = ""; // language          == lng -      string relation          = ""; // relation          == rl -      string coverage          = ""; // coverage          == cv -      string rights            = ""; // rights            == rgt -      string comment           = ""; // comment           == cmt -      // string abstract          = ""; -      string src_filename_base = ""; // src_filename_base == bfn -      string results_type      = ""; // results_type      == rt     radio -      string sql_match_limit   = ""; // sql_match_limit   == sml    radio -      string sql_match_offset  = ""; // sql_match_offset  == smo -      string stats             = ""; // stats             == sts    checked -      string echo              = ""; // echo              == ec     checked -      string url               = ""; // url               == url    checked -      string searched          = ""; // searched          == se     checked -      string sql               = ""; // sql               == sql    checked -    } -    auto rgx  = Rgx(); -    auto got  = searchFields(); -    if (environment.get("REQUEST_METHOD", "POST") == "POST") { -      if ("sf" in cgi.post) { -        got.search_text_area =  cgi.post["sf"]; -        if (auto m = got.search_text_area.matchFirst(rgx.text)) { -          got.text = m["matched"]; -          got.canned_query ~= "sf=" ~ m["matched"]; -        } else if (auto m = got.search_text_area.matchFirst(rgx.line)) { -          if ( -            !(m["matched"].matchFirst(rgx.author)) -            && !(m["matched"].matchFirst(rgx.title)) -          ) { -            got.text = m["matched"]; -            got.canned_query ~= "sf=" ~ m["matched"]; -          } -        } -        if (auto m = got.search_text_area.matchFirst(rgx.author)) { -          got.author = m["matched"]; -          got.canned_query ~= "&au=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.title)) { -          got.title = m["matched"]; -          got.canned_query ~= "&ti=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.uid)) { -          got.uid = m["matched"]; -          got.canned_query ~= "&uid=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.fn)) { -          got.fn = m["matched"]; -          got.canned_query ~= "&fn=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.keywords)) { -          got.keywords = m["matched"]; -          got.canned_query ~= "&kw=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.topic_register)) { -          got.topic_register = m["matched"]; -          got.canned_query ~= "&tr=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.subject)) { -          got.subject = m["matched"]; -          got.canned_query ~= "&su=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.description)) { -          got.description = m["matched"]; -          got.canned_query ~= "&de=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.publisher)) { -          got.publisher = m["matched"]; -          got.canned_query ~= "&pb=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.editor)) { -          got.editor = m["matched"]; -          got.canned_query ~= "&ed=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.contributor)) { -          got.contributor = m["matched"]; -          got.canned_query ~= "&ct=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.date)) { -          got.date = m["matched"]; -          got.canned_query ~= "&dt=" ~ m["matched"]; -        } -        // if (auto m = got.search_text_area.matchFirst(rgx.results_type)) { -        //   got.results_type = m["matched"]; -        //   got.canned_query ~= "&rt=" ~ m["matched"]; -        // } -        if (auto m = got.search_text_area.matchFirst(rgx.format)) { -          got.format = m["matched"]; -          got.canned_query ~= "&fmt=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.identifier)) { -          got.identifier = m["matched"]; -          got.canned_query ~= "&id=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.source)) { -          got.source = m["matched"]; -          got.canned_query ~= "&src=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.language)) { -          got.language = m["matched"]; -          got.canned_query ~= "&lng=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.relation)) { -          got.relation = m["matched"]; -          got.canned_query ~= "&rl=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.coverage)) { -          got.coverage = m["matched"]; -          got.canned_query ~= "&cv=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.rights)) { -          got.rights = m["matched"]; -          got.canned_query ~= "&rgt=" ~ m["matched"]; -        } -        if (auto m = got.search_text_area.matchFirst(rgx.comment)) { -          got.comment = m["matched"]; -          got.canned_query ~= "&cmt=" ~ m["matched"]; -        } -        // if (auto m = search_text_area.matchFirst(rgx.abstract)) { -        //   got.abstract = m["matched"]; -        // } -        if (auto m = got.search_text_area.matchFirst(rgx.src_filename_base)) { -          got.src_filename_base = m["matched"]; -          got.canned_query ~= "&bfn=" ~ m["matched"]; -        } -      } -      if ("fn" in cgi.post) { -        got.fn =  cgi.post["fn"]; -        got.canned_query ~= "&fn=" ~ cgi.post["fn"]; -      } -      if ("rt" in cgi.post) { -        got.results_type =  cgi.post["rt"]; -        got.canned_query ~= "&rt=" ~ cgi.post["rt"]; -      } -      if ("sts" in cgi.post) { -        got.stats =  cgi.post["sts"]; -        got.canned_query ~= "&sts=" ~ cgi.post["sts"]; -      } -      if ("ec" in cgi.post) { -        got.echo =  cgi.post["ec"]; -        got.canned_query ~= "&ec=" ~ cgi.post["ec"]; -      } -      if ("url" in cgi.post) { -        got.url =  cgi.post["url"]; -        got.canned_query ~= "&url=" ~ cgi.post["url"]; -      } -      if ("se" in cgi.post) { -        got.searched =  cgi.post["se"]; -        got.canned_query ~= "&se=" ~ cgi.post["se"]; -      } -      if ("sql" in cgi.post) { -        got.sql =  cgi.post["sql"]; -        got.canned_query ~= "&sql=" ~ cgi.post["sql"]; -      } -      if ("sml" in cgi.post) { -        got.sql_match_limit =  cgi.post["sml"]; -        got.canned_query ~= "&sml=" ~ cgi.post["sml"]; -      } -      if ("smo" in cgi.post) { -        got.sql_match_offset = "0";   // cgi.post["smo"]; -        got.canned_query ~= "&smo=0"; //  ~ cgi.post["smo"]; -      } -      got.canned_query = got.canned_query.strip.split(" ").join("%20"); -      conf.query_string = got.canned_query; -      // cgi.write("f.canned_query: " ~ got.canned_query ~ "<br>"); -    } else if (environment.get("REQUEST_METHOD", "POST") == "GET") { -      got.canned_query = environment.get("QUERY_STRING", ""); -      // cgi.write("f.canned_query: " ~ got.canned_query ~ "<br>"); -      got.search_text_area = ""; -      if ("sf" in canned_query && !(canned_query["sf"]).empty) { -        got.text = canned_query["sf"].split("%20").join(" "); -        got.search_text_area ~= "text: " ~ got.text ~ "\n"; -      } -      if ("au" in canned_query && !(canned_query["au"]).empty) { -        got.author = canned_query["au"].split("%20").join(" "); -        got.search_text_area ~= "author: " ~ got.author ~ "\n"; -      } -      if ("ti" in canned_query && !(canned_query["ti"]).empty) { -        got.title = canned_query["ti"].split("%20").join(" "); -        got.search_text_area ~= "title: " ~ got.title ~ "\n"; -      } -      if ("uid" in canned_query && !(canned_query["uid"]).empty) { -        got.uid = canned_query["uid"].split("%20").join(" "); -        got.search_text_area ~= "uid: " ~ got.uid ~ "\n"; -      } -      if ("fn" in canned_query && !(canned_query["fn"]).empty) { -        got.fn = canned_query["fn"].split("%20").join(" "); -        got.search_text_area ~= "fn: " ~ got.fn ~ "\n"; -      } -      if ("kw" in canned_query && !(canned_query["kw"]).empty) { -        got.keywords = canned_query["kw"].split("%20").join(" "); -        got.search_text_area ~= "keywords: " ~ got.keywords ~ "\n"; -      } -      if ("tr" in canned_query && !(canned_query["tr"]).empty) { -        got.topic_register = canned_query["tr"].split("%20").join(" "); -        got.search_text_area ~= "topic_register: " ~ got.topic_register ~ "\n"; -      } -      if ("su" in canned_query && !(canned_query["su"]).empty) { -        got.subject = canned_query["su"].split("%20").join(" "); -        got.search_text_area ~= "subject: " ~ got.subject ~ "\n"; -      } -      if ("de" in canned_query && !(canned_query["de"]).empty) { -        got.description = canned_query["de"].split("%20").join(" "); -        got.search_text_area ~= "description: " ~ got.description ~ "\n"; -      } -      if ("pb" in canned_query && !(canned_query["pb"]).empty) { -        got.publisher = canned_query["pb"].split("%20").join(" "); -        got.search_text_area ~= "publisher: " ~ got.publisher ~ "\n"; -      } -      if ("ed" in canned_query && !(canned_query["ed"]).empty) { -        got.editor = canned_query["ed"].split("%20").join(" "); -        got.search_text_area ~= "editor: " ~ got.editor ~ "\n"; -      } -      if ("ct" in canned_query && !(canned_query["ct"]).empty) { -        got.contributor = canned_query["ct"].split("%20").join(" "); -        got.search_text_area ~= "contributor: " ~ got.contributor ~ "\n"; -      } -      if ("dt" in canned_query && !(canned_query["dt"]).empty) { -        got.date = canned_query["dt"].split("%20").join(" "); -        got.search_text_area ~= "date: " ~ got.date ~ "\n"; -      } -      if ("rt" in canned_query && !(canned_query["rt"]).empty) { -        got.results_type = canned_query["rt"].split("%20").join(" "); -        // got.search_text_area ~= "results_type: " ~ got.results_type ~ "\n"; -      } -      if ("fmt" in canned_query && !(canned_query["fmt"]).empty) { -        got.format = canned_query["fmt"].split("%20").join(" "); -        got.search_text_area ~= "format: " ~ got.format ~ "\n"; -      } -      if ("id" in canned_query && !(canned_query["id"]).empty) { -        got.identifier = canned_query["id"].split("%20").join(" "); -        got.search_text_area ~= "identifier: " ~ got.identifier ~ "\n"; -      } -      if ("src" in canned_query && !(canned_query["src"]).empty) { -        got.source = canned_query["src"].split("%20").join(" "); -        got.search_text_area ~= "source: " ~ got.source ~ "\n"; -      } -      if ("lng" in canned_query && !(canned_query["lng"]).empty) { -        got.language = canned_query["lng"].split("%20").join(" "); -        got.search_text_area ~= "language: " ~ got.language ~ "\n"; -      } -      if ("rl" in canned_query && !(canned_query["rl"]).empty) { -        got.relation = canned_query["rl"].split("%20").join(" "); -        got.search_text_area ~= "relation: " ~ got.relation ~ "\n"; -      } -      if ("cv" in canned_query && !(canned_query["cv"]).empty) { -        got.coverage = canned_query["cv"].split("%20").join(" "); -        got.search_text_area ~= "coverage: " ~ got.coverage ~ "\n"; -      } -      if ("rgt" in canned_query && !(canned_query["rgt"]).empty) { -        got.rights = canned_query["rgt"].split("%20").join(" "); -        got.search_text_area ~= "rights: " ~ got.rights ~ "\n"; -      } -      if ("cmt" in canned_query && !(canned_query["cmt"]).empty) { -        got.comment = canned_query["cmt"].split("%20").join(" "); -        got.search_text_area ~= "comment: " ~ got.comment ~ "\n"; -      } -      // if ("abstract" in canned_query && !(canned_query["abstract"]).empty) { -      //   got.abstract = canned_query["abstract"]; -      // } -      if ("bfn" in canned_query && !(canned_query["bfn"]).empty) { // search_field -        got.src_filename_base = canned_query["bfn"].split("%20").join(" "); -        got.search_text_area ~= "src_filename_base: " ~ got.src_filename_base ~ "\n"; -      } -      if ("sml" in canned_query && !(canned_query["sml"]).empty) { -        got.sql_match_limit = canned_query["sml"].split("%20").join(" "); -        // got.search_text_area ~= "sql_match_limit: " ~ got.sql_match_limit ~ "\n"; -      } -      // cgi.write("f.search_text_area: " ~ got.search_text_area ~ "<br>"); -    } -    return got; -  } -  auto tf = text_fields; // -  struct SQL_select { -    string the_body         = ""; -    string the_range        = ""; -  } -  auto sql_select = SQL_select(); -  string base                  ; // = ""; -  string tip                   ; // = ""; -  string search_note           ; // = ""; -  uint   sql_match_offset_count   = 0; -  string previous_next () { -    static struct Rgx { -      static track_offset = ctRegex!(`(?P<offset_key>[&]smo=)(?P<offset_val>[0-9]+)`, "m"); -    } -    auto rgx = Rgx(); -    string _previous_next = ""; -    int    _current_offset_value = 0; -    string _set_offset_next = ""; -    string _set_offset_previous = ""; -    string _url = ""; -    string _url_previous = ""; -    string _url_next = ""; -    string arrow_previous = ""; -    string arrow_next = ""; -    if (environment.get("REQUEST_METHOD", "POST") == "POST") { -      _url = conf.http_request_type ~ "://" ~ conf.http_host ~ conf.cgi_script ~ "?" ~ tf.canned_query; -    } else if (environment.get("REQUEST_METHOD", "POST") == "GET") { -      _url = conf.http_request_type ~ "://" ~ conf.http_host ~ conf.cgi_script ~ "?" ~ environment.get("QUERY_STRING", ""); -    } -    if (auto m = _url.matchFirst(rgx.track_offset)) { -      _current_offset_value = m.captures["offset_val"].to!int; -      _set_offset_next = m.captures["offset_key"] ~ ((m.captures["offset_val"]).to!int + cv.sql_match_limit.to!int).to!string; -      _url_next = _url.replace(rgx.track_offset, _set_offset_next); -      if (_current_offset_value < cv.sql_match_limit.to!int) { -        _url_previous = ""; -      } else { -        _url_previous = ""; -        _set_offset_previous = m.captures["offset_key"] ~ ((m.captures["offset_val"]).to!int - cv.sql_match_limit.to!int).to!string; -        _url_previous = _url.replace(rgx.track_offset, _set_offset_previous); -      } -    } else {// _current_offset_value = 0; -      _url_next = _url ~= "&smo=" ~ cv.sql_match_limit.to!string; -    } -    if (_url_previous.empty) { -      arrow_previous = ""; -    } else { -      arrow_previous = -        "<font size=\"2\" color=\"#666666\">" -        ~ "<a href=\"" -        ~ _url_previous -        ~ "\">" -        ~ "<< prev" -        ~ "</a> || </font>"; -    } -    arrow_next = -      "<font size=\"2\" color=\"#666666\">" -      ~ "<a href=\"" -      ~ _url_next -      ~ "\">" -      ~ "next >>" -      ~ "</a></font>"; -    _previous_next = "<hr>" ~ arrow_previous ~ arrow_next; -    return _previous_next; -  } -  { -    header = format(q"┃ -<!DOCTYPE html> -<html> -<head> -  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> -  <title> -    SiSU spine search form (sample) -  </title> -  <meta name="sourcefile" content="SiSU.sst" /> -  <link rel="generator" href="sisudoc.org" /> -  <link rel="shortcut icon" href="https://%s/image_sys/spine.ico" /> -  <style media = "all"> -   *{ -    padding                  : 0px; -    margin                   : 2px; -  } -  body { -    height                   : 100vh; -    background-color         : #FFFFFF; -  } -  body { -    color                    : #000000; -    background               : #FFFFFF; -    background-color         : #FFFFFF; -  } -  a:link { -    color                    : #003399; -    text-decoration          : none; -  } -  a:visited { -    color                    : #003399; -    text-decoration          : none; -  } -  a:hover { -    color                    : #000000; -    background-color         : #F9F9AA; -  } -  a.lnkocn:link { -    color                    : #777777; -    text-decoration          : none; -  } -  a.lnkocn:visited { -    color                    : #32CD32; -    text-decoration          : none; -  } -  a.lnkocn:hover { -    color                    : #777777; -    font-size                : 15px; -  } -  a:hover img { -    background-color         : #FFFFFF; -  } -  a:active { -    color                    : #003399; -    text-decoration          : underline; -  } -  hr { -    width                    : 100%%; -    margin-left              : 0%%; -    margin-right             : 0em; -    margin-top               : 0.5em; -    margin-bottom            : 0.5em; -  } -</style> -</head> -<body lang="en" xml:lang="en"> -┃", -    conf.http_host, -   ); -  } -  { -    table = format(q"┃ - <table summary="band" border="0" cellpadding="2" cellspacing="0"> - <tr><td width="20%%"> -  <table summary="home button / home information" border="0" cellpadding="2" cellspacing="0"> -  <tr><td align="left"> -   <br /><a href="https://sisudoc.org/" target="_top"> -     <b>SiSU</b> -   </a> -   <br /><a href="https://git.sisudoc.org/" target="_top"> -     git -   </a> -  </td></tr> -  </table> - </td> - <td> -   <label for="find"><b>SiSU spine (generated) search form</b></label> - </td></tr> - </table> - ┃"); -  } -  { -    string post_value(string field_name, string type="box", string set="on") { -      string val = ""; -      switch (type) { -      case "field": -        val = ((field_name in cgi.post && !(cgi.post[field_name]).empty) -          ? cgi.post[field_name] -          : (field_name in cgi.get) -            ? cgi.get[field_name] -            : ""); -        val = tf.search_text_area; -        break; -      case "box": // generic for checkbox or radio; checkbox set == "on" radio set == "name set" -        val = ((field_name in cgi.post && !(cgi.post[field_name]).empty) -          ? (cgi.post[field_name]  == set ? "checked" : "off") -          : (field_name in cgi.get) -            ? (cgi.get[field_name] == set ? "checked" : "off") -            :                               "off"); -        break; -      case "radio": // used generic bo -        val = ((field_name in cgi.post && !(cgi.post[field_name]).empty) -          ? (cgi.post[field_name]  == set ? "checked" : "off") -          : (field_name in cgi.get) -            ? (cgi.get[field_name] == set ? "checked" : "off") -            :                               "checked"); -        break; -      case "checkbox": // used generic bo -        val = ((field_name in cgi.post && !(cgi.post[field_name]).empty) -          ? (cgi.post[field_name]  == set ? "checked" : "off") -          : (field_name in cgi.get) -            ? (cgi.get[field_name] == set ? "checked" : "off") -            :                               "checked"); -        break; -      default: -      } -      return val; -    } -    string the_can(string fv) { -      string show_the_can = post_value("url"); -      string _the_can = ""; -      if (show_the_can == "checked") { -        tf = text_fields; -        string method_get_url            = conf.http_request_type ~ "://" ~ conf.http_host ~ conf.cgi_script ~ "?" ~ environment.get("QUERY_STRING", ""); -        string method_post_url_construct = conf.http_request_type ~ "://" ~ conf.http_host ~ conf.cgi_script ~ "?" ~ tf.canned_query; -        // assert(method_get_url == environment.get("HTTP_REFERER", conf.http_request_type ~ "://" ~ conf.http_host ~ conf.cgi_script ~ "?" ~ conf.query_string)); -        if (conf.request_method == "POST") { -          _the_can = -            "<font size=\"2\" color=\"#666666\">" -            ~ "POST: " -            ~ "<a href=\"" -            ~ method_post_url_construct -            ~ "\">" -            ~ method_post_url_construct -            ~ "</a></font>" -            ~ "<br>"; -        } else if (conf.request_method == "GET") { -          _the_can = -            "<font size=\"2\" color=\"#666666\">" -            ~ "GET:  " -            ~ "<a href=\"" -            ~ method_get_url -            ~ "\">" -            ~ method_get_url -            ~ "</a></font>"; -        } -        conf.http_url = conf.http_request_type ~ "://" ~ conf.http_host ~ conf.cgi_script ~ tf.canned_query; -      } -      return _the_can; -    } -    string provide_tip() { -      string searched_tip = post_value("se"); -      string tip = ""; -      if (searched_tip == "checked") { -        string search_field = post_value("sf", "field"); -        tf = text_fields; -        tip = format(q"┃ -<font size="2" color="#666666"> -<b>database:</b> <font size="2" color="#004000">%s</font>; <b>selected view:</b> <font size="2" color="#004000">index</font> -<b>search string:</b> %s %s %s %s %s %s<br /> -%s %s %s %s %s %s -</font> -┃", -          cv.db_selected, -          (tf.text.empty   ? "" : "\"text:   <font size=\"2\" color=\"#004000\">" ~ tf.text   ~ "</font>; "), -          (tf.title.empty  ? "" : "\"title:  <font size=\"2\" color=\"#004000\">" ~ tf.title  ~ "</font>; "), -          (tf.author.empty ? "" : "\"author: <font size=\"2\" color=\"#004000\">" ~ tf.author ~ "</font>; "), -          (tf.date.empty   ? "" : "\"date    <font size=\"2\" color=\"#004000\">" ~ tf.date   ~ "</font>; "), -          (tf.uid.empty    ? "" : "\"uid:    <font size=\"2\" color=\"#004000\">" ~ tf.uid    ~ "</font>; "), -          (tf.fn.empty     ? "" : "\"fn:     <font size=\"2\" color=\"#004000\">" ~ tf.fn     ~ "</font>; "), -          (tf.text.empty   ? "" :  "text:    <font size=\"2\" color=\"#004000\">" ~ tf.text   ~ "</font><br />"), -          (tf.title.empty  ? "" : "title:    <font size=\"2\" color=\"#004000\">" ~ tf.title  ~ "</font><br />"), -          (tf.author.empty ? "" :  "author:  <font size=\"2\" color=\"#004000\">" ~ tf.author ~ "</font><br />"), -          (tf.date.empty   ? "" :  "date:    <font size=\"2\" color=\"#004000\">" ~ tf.date   ~ "</font><br />"), -          (tf.uid.empty    ? "" : "\"uid:    <font size=\"2\" color=\"#004000\">" ~ tf.uid    ~ "</font>; "), -          (tf.fn.empty     ? "" : "\"fn:     <font size=\"2\" color=\"#004000\">" ~ tf.fn     ~ "</font>; "), -         ); -      } -      return tip; -    } -    form = format(q"┃ -<form action="%s" id="SubmitForm" method="post" accept-charset="UTF-8"> -  <table cellpadding="2"> -  <tr><td valign=\"top\"> -      <textarea id="find" name="sf" type="text" rows="6" cols="40" maxlength="256" wrap="virtual">%s</textarea> -  </td> -  <td valign=\"top\"> -    %s -    %s -    %s -  </td></tr></table> -  <td valign=\"top\"><tr><td> -    <font size="2" color="#222222"> -    <b>to search:</b> select which database to search (drop-down menu below); enter your search query (in the form above); and <b>click on the search button</b> (below) -    <br /> -    <select name="db" size="1"> -      %s -      <option value="%s">spine</option> -    </select> -    <input type="submit" value="SiSU search" /> -    <input type="radio" name="rt" id="results_type_index" value="idx" %s> index -    <input type="radio" name="rt" id="results_type_text" value="txt" %s> text / grep; -    match limit: -    <input type="radio" name="sml" id="sql_match_limit_1000" value="1000" %s> 1,000 -    <input type="radio" name="sml" id="sql_match_limit_2500" value="2500" %s> 2,500 -    <br /> -      <input type="checkbox" name="ec" %s> echo query -      <input type="checkbox" name="sts" %s> result stats -      <input type="checkbox" name="url" %s> search url -      <input type="checkbox" name="se" %s> searched -      <input type="checkbox" name="tip" %s> available fields -      <input type="checkbox" name="sql" %s> sql statement -      <input type="hidden" name="smo" value="0"> -    <br /> -    </font> -  </td></tr> -  </table> -</form> -┃", -      "spine-search", -      (post_value("ec") == "checked") ? post_value("sf", "field") : "", -      provide_tip, -      search_note, -      the_can(post_value("sf", "field")), -      cv.db_selected, -      cv.db_selected, -      post_value("rt",  "box", "idx"), -      post_value("rt",  "box", "txt"), -      post_value("sml", "box", "1000"), -      post_value("sml", "box", "2500"), -      post_value("ec"), -      post_value("sts"), -      post_value("url"), -      post_value("se"), -      post_value("tip"), -      post_value("sql"), -    ); -    { -      string set_value(string field_name, string default_val) { -        string val; -        if (field_name in cgi.post) { -          val = cgi.post[field_name]; -        } else if (field_name in cgi.get) { -          val = cgi.get[field_name]; -        } else { val = default_val; } -        return val; -      } -      bool set_bool(string field_name) { -        bool val; -        if (field_name in cgi.post -        && cgi.post[field_name] == "on") { -          val = true; -        } else if (field_name in cgi.get -        && cgi.get[field_name] == "on") { -          val = true; -        } else { val = false; } -        return val; -      } -      cv.db_selected      = set_value("selected_db",      "spine.search.db"); // selected_db == db -      cv.sql_match_limit  = set_value("sml", "1000"); -      cv.sql_match_offset = set_value("smo", "0"); -      cv.search_text      = set_value("sf",  "test");                         // remove test -      cv.results_type     = set_value("rt",  "idx"); -      cv.checked_echo     = set_bool("ec"); -      cv.checked_stats    = set_bool("sts"); -      cv.checked_url      = set_bool("url"); -      cv.checked_searched = set_bool("se"); -      cv.checked_tip      = set_bool("tip"); -      cv.checked_sql      = set_bool("sql"); -      tf = text_fields; -    } -  } -  { -    cgi.write(header); -    cgi.write(table); -    cgi.write(form); -    // cgi.write(previous_next); -    { // debug environment -      // foreach (k, d; environment.toAA) { -      //   cgi.write(k ~ ": " ~ d ~ "<br>"); -      // } -    } -    { // debug cgi info -      // cgi.write("db_selected: "         ~ cv.db_selected ~ "<br>\n"); -      // cgi.write("search_text: "         ~ cv.search_text ~ "<br>\n"); -      // cgi.write("sql_match_limit: "     ~ cv.sql_match_limit ~ ";\n"); -      // cgi.write("sql_match_offset: "    ~ cv.sql_match_offset ~ ";\n"); -      // cgi.write("results_type: "        ~ cv.results_type ~ "<br>\n"); -      // cgi.write("cv.checked_echo: "     ~ (cv.checked_echo ? "checked" : "off") ~ "; \n"); -      // cgi.write("cv.checked_stats: "    ~ (cv.checked_stats ? "checked" : "off") ~ "; \n"); -      // cgi.write("cv.checked_url: "      ~ (cv.checked_url ? "checked" : "off") ~ "; \n"); -      // cgi.write("cv.checked_searched: " ~ (cv.checked_searched ? "checked" : "off") ~ ";<br>\n"); -      // cgi.write("cv.checked_tip: "      ~ (cv.checked_tip ? "checked" : "off") ~ "; \n"); -      // cgi.write("cv.checked_sql: "      ~ (cv.checked_sql ? "checked" : "off") ~ "<br>\n"); -    } -  } -  auto db = Database(conf.db_path ~ cv.db_selected); -  { -    uint sql_match_offset_counter(T)(T cv) { -      sql_match_offset_count += cv.sql_match_limit.to!uint; -      return sql_match_offset_count; -    } -    void sql_search_query() { -      string select_field_like(string db_field, string search_field) { -        string where_ = ""; -        if (!(search_field.empty)) { -          string _sf = search_field.strip.split("%20").join(" "); -          if (_sf.match(r" OR ")) { -            _sf = _sf.split(" OR ").join("%' OR " ~ db_field ~ " LIKE '%"); -          } -          if (_sf.match(r" AND ")) { -            _sf = _sf.split(" AND ").join("%' AND " ~ db_field ~ " LIKE '%"); -          } -          _sf = "( " ~ db_field ~ " LIKE\n  '%" ~ _sf ~ "%' )"; -          where_ ~= format(q"┃ -  %s -┃", -            _sf -          ); -        } -        return where_; -      } -      string[] _fields; -      _fields ~= select_field_like("doc_objects.clean",                         tf.text); -      _fields ~= select_field_like("metadata_and_text.title",                   tf.title); -      _fields ~= select_field_like("metadata_and_text.creator_author",          tf.author); -      _fields ~= select_field_like("metadata_and_text.uid",                     tf.uid); -      _fields ~= select_field_like("metadata_and_text.src_filename_base",       tf.fn); -      _fields ~= select_field_like("metadata_and_text.src_filename_base",       tf.src_filename_base); -      _fields ~= select_field_like("metadata_and_text.language_document_char",  tf.language); -      _fields ~= select_field_like("metadata_and_text.date_published",          tf.date); -      _fields ~= select_field_like("metadata_and_text.classify_keywords",       tf.keywords); -      _fields ~= select_field_like("metadata_and_text.classify_topic_register", tf.topic_register); -      string[] fields; -      foreach (f; _fields) { -        if (!(f.empty)) { fields ~= f; } -      } -      string fields_str = ""; -      fields_str ~= fields.join(" AND "); -      sql_select.the_body ~= format(q"┃ -SELECT -  metadata_and_text.uid, -  metadata_and_text.title, -  metadata_and_text.creator_author_last_first, -  metadata_and_text.creator_author, -  metadata_and_text.src_filename_base, -  metadata_and_text.language_document_char, -  metadata_and_text.date_published, -  metadata_and_text.classify_keywords, -  metadata_and_text.classify_topic_register, -  doc_objects.body, -  doc_objects.seg_name, -  doc_objects.ocn, -  metadata_and_text.uid -FROM -  doc_objects, -  metadata_and_text -WHERE ( -  %s -  ) -AND -  doc_objects.uid_metadata_and_text = metadata_and_text.uid -ORDER BY -  metadata_and_text.creator_author_last_first, -  metadata_and_text.date_published DESC, -  metadata_and_text.title, -  metadata_and_text.language_document_char, -  metadata_and_text.src_filename_base, -  doc_objects.ocn -LIMIT %s OFFSET %s -;┃", -        fields_str, -        cv.sql_match_limit, -        cv.sql_match_offset, -      ); -      (cv.checked_sql) -      ? cgi.write(previous_next ~ "<hr><font size=\"2\" color=\"#666666\">" ~ sql_select.the_body.split("\n  ").join(" ").split("\n").join("<br>") ~ "</font>\n") -      : ""; -      cgi.write(previous_next); -      auto select_query_results = db.execute(sql_select.the_body).cached; -      string _old_uid = ""; -      if (!select_query_results.empty) { -        foreach (row; select_query_results) { -          if (row["uid"].as!string != _old_uid) { -            _old_uid = row["uid"].as!string; -            auto m = (row["date_published"].as!string).match(regex(r"^([0-9]{4})")); // breaks if row missing or no match? -            cgi.write( -              "<hr><a href=\"" -                ~ "https://" ~ conf.http_host ~ "/" -                ~ row["language_document_char"].as!string ~ "/html/" -                ~ row["src_filename_base"].as!string ~ "/" -                ~ "toc.html" -              ~ "\">\"" -                ~ row["title"].as!string ~ "\"" -              ~ "</a> (" -              ~ m.hit -              ~ ") " -              ~ "[" -              ~ row["language_document_char"].as!string -              ~ "] " -              ~ row["creator_author_last_first"].as!string -              ~ ":<br>\n" -            ); -          } -          if (cv.results_type == "txt") { -            cgi.write( -              "<hr><a href=\"" -                ~ "https://" ~ conf.http_host ~ "/" -                ~ row["language_document_char"].as!string ~ "/html/" -                ~ row["src_filename_base"].as!string ~ "/" -                ~ row["seg_name"].as!string ~ ".html#" ~ row["ocn"].as!string -              ~ "\">" -                ~ row["ocn"].as!string -              ~ "</a>" -              ~ "<br>" -              ~ row["body"].as!string -            ); -          } else { -            cgi.write( -              "<a href=\"" -                ~ "https://" ~ conf.http_host ~ "/" -                ~ row["language_document_char"].as!string ~ "/html/" -                ~ row["src_filename_base"].as!string ~ "/" -                ~ row["seg_name"].as!string ~ ".html#" ~ row["ocn"].as!string -              ~ "\">" -                ~ row["ocn"].as!string -              ~ "</a>, " -            ); -          } -        } -        cgi.write( previous_next); -      } else { // offset_not_beyond_limit = false; -        cgi.write("select_query_results empty<p>\n"); -      } -    } -    sql_search_query; -  } -  { -    db.close; -  } -  { -    string tail = format(q"┃ -</body> -┃"); -    cgi.write(tail); -  } -} -mixin GenericMain!cgi_function_intro; diff --git a/misc/util/d/tools/markup_conversion/README b/misc/util/d/tools/markup_conversion/README deleted file mode 100644 index 8b13789..0000000 --- a/misc/util/d/tools/markup_conversion/README +++ /dev/null @@ -1 +0,0 @@ - diff --git a/misc/util/d/tools/markup_conversion/endnotes_inline_from_binary.d b/misc/util/d/tools/markup_conversion/endnotes_inline_from_binary.d deleted file mode 100755 index b084052..0000000 --- a/misc/util/d/tools/markup_conversion/endnotes_inline_from_binary.d +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env rdmd -/+ -  - read in file .sst .ssi .ssm -  - loop twice -    - first -      - check for and skip code blocks -      - use unique code marker for endnote markers in text and give an endnote -        number ★1, increment -      - extract all endnotes in array -    - second -      - check that the footnote marker number count matches the number of notes -        in the array -        - if they match either: -            - substitute each endnote marker with the array footnote[number-1] -            - substitute each endnote marker with footnote -              as inlined footnote markup (footnote number not needed) -        - if they do not match exit -  - check whether changes have been made -    - if so write file with inline footnotes in sub-directory converted_output_/ -      using the same name as the original file -    - else, exit -+/ -import std.stdio; -import std.file; -import std.array : split; -import std.exception; -import core.stdc.errno; -import std.regex; -import std.format; -import std.conv; -void main(string[] args) { -  static comment                 = ctRegex!(`^%+ `); -  static block_tic_code_open     = ctRegex!("^`{3} code(?:[.](?P<syntax>[a-z][0-9a-z#+_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?"); -  static block_tic_close         = ctRegex!("^(`{3})$","m"); -  static block_curly_code_open   = ctRegex!(`^(?:code(?:[.](?P<syntax>[a-z][0-9a-z_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?[{][ ]*$)`); -  static block_curly_code_close  = ctRegex!(`^([}]code)`); -  auto rgx_endnote_ref           = ctRegex!(`([~]\^)(?P<tail>[)\]]? |$)`, "gm"); -  auto rgx_endnote               = ctRegex!(`^\^~\s+(.+|\n)`, "gm"); -  foreach(arg; args[1..$]) { -    if ( -      !(arg.match(regex(r"--\w+"))) -      && arg.match(regex(r"\w+?\.ss[itm]")) -    ) { -      writeln(arg); -      string filename                  = arg; -      try { -        string[] contents, endnotes, endnote_refs; -        string text                    = filename.readText; -        string[] paragraphs            = text.split("\n\n"); -        int endnote_ref_count          = 0; -        int code_block_status          = 0; -        enum codeBlock { off, curly, tic, } -        foreach (paragraph; paragraphs) { /+ loop to gather binary endnotes +/ -          if (code_block_status == codeBlock.off -            && paragraph.match(rgx_endnote) -          ) { -            endnotes ~= replaceAll!(m => m[1]) -              (paragraph, rgx_endnote); -          } else { -            if ((code_block_status == codeBlock.curly -                && paragraph.matchFirst(block_curly_code_close)) -              || ((code_block_status == codeBlock.tic -                && paragraph.matchFirst(block_tic_close)) -            ) { -              code_block_status = codeBlock.off; -            } else if ( type["curly_code"] == 1 || type["tic_code"] == 1) { -              // skip, prevent search for endnotes -            } else if (paragraph.matchFirst(block_curly_code_open)) { -              code_block_status = codeBlock.curly; -            } else if (paragraph.matchFirst(block_tic_code_open)) { -              code_block_status = codeBlock.tic; -            } else if (auto m = paragraph.matchAll(rgx_endnote_ref)) { -              foreach (n; m) { -                endnote_ref_count++; // endnote_refs ~= (n.captures[1]); -              } -            } -            contents ~= paragraph; -          } -        } -        if (endnotes.length == endnote_ref_count) { -          import std.outbuffer; -          writeln("endnote ref count:         ", endnote_ref_count); -          writeln("number of binary endnotes: ", endnotes.length); -          int endnote_count = -1; -          auto buffer = new OutBuffer(); -          foreach (content; contents) { /+ loop to inline endnotes +/ -            content = replaceAll!(m => "~{ " ~ endnotes[++endnote_count] ~ " }~" ~ m["tail"] ) -              (content, rgx_endnote_ref); -            buffer.write(content ~ "\n\n"); -          } -          if (buffer) { -            try { -              string dir_out = "converted_output_"; -              string path_and_file_out = dir_out ~ "/" ~ filename; -              dir_out.mkdirRecurse; -              auto f = File(path_and_file_out, "w"); -              f.write(buffer); -              writeln("wrote: ", path_and_file_out); -            } catch (FileException ex) { -              writeln("did not write file"); -              // Handle errors -            } -          } -        } else { -          writeln("ERROR binary endnote mismatch, check markup,\nmisatch in the number of endnotes & endnote references!"); -          writeln("  number of endnotes:     ", endnotes.length); -          writeln("  number of endnote refs: ", endnote_ref_count); // endnote_refs.length, -        } -        // assert(endnotes.length == endnote_ref_count); -      } catch (ErrnoException ex) { -        switch(ex.errno) { -          case EPERM: -          case EACCES: // Permission denied -            break; -          case ENOENT: // File does not exist -            break; -          default:     // Handle other errors -            break; -        } -      } -    } -  } -} diff --git a/misc/util/d/tools/markup_conversion/markup_changes.d b/misc/util/d/tools/markup_conversion/markup_changes.d deleted file mode 100644 index 4274f78..0000000 --- a/misc/util/d/tools/markup_conversion/markup_changes.d +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env rdmd -/+ -  - read in file .sst .ssi .ssm -  - loop twice -    - first -      - check for and skip code blocks -      - use unique code marker for endnote markers in text and give an endnote -        number ★1, increment -      - extract all endnotes in array -    - second -      - check that the footnote marker number count matches the number of notes -        in the array -        - if they match either: -            - substitute each endnote marker with the array footnote[number-1] -            - substitute each endnote marker with footnote -              as inlined footnote markup (footnote number not needed) -        - if they do not match exit -  - check whether changes have been made -    - if so write file with inline footnotes in sub-directory converted_output_/ -      using the same name as the original file -    - else, exit -+/ -import std.stdio; -import std.file; -import std.array : split; -import std.exception; -import core.stdc.errno; -import std.regex; -import std.format; -import std.conv; -void main(string[] args) { -  static comment                 = ctRegex!(`^%+ `); -  static block_tic_code_open     = ctRegex!("^`{3} code(?:[.](?P<syntax>[a-z][0-9a-z#+_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?"); -  static block_tic_close         = ctRegex!("^(`{3})$","m"); -  static block_curly_code_open   = ctRegex!(`^(?:code(?:[.](?P<syntax>[a-z][0-9a-z_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?[{][ ]*$)`); -  static block_curly_code_close  = ctRegex!(`^([}]code)`); -  auto rgx_endnote_ref           = ctRegex!(`([~]\^)(?P<tail>[)\]]? |$)`, "gm"); -  auto rgx_endnote               = ctRegex!(`^\^~\s+(.+|\n)`, "gm"); -  foreach(arg; args[1..$]) { -    if ( -      !(arg.match(regex(r"--\w+"))) -      && arg.match(regex(r"\w+?\.ss[itm]")) -    ) { -      writeln(arg); -      string filename                  = arg; -      try { -        string[] contents, endnotes, endnote_refs; -        string text                    = filename.readText; -        string[] paragraphs            = text.split("\n\n"); -        int endnote_ref_count          = 0; -        int[string] type = [ -          "curly_code"                 : 0, -          "tic_code"                   : 0, -        ]; -        foreach (paragraph; paragraphs) { /+ loop to gather binary endnotes +/ -          if ( !( type["curly_code"] == 1 || type["tic_code"] == 1) -            && paragraph.match(rgx_endnote) -          ) { -            endnotes ~= replaceAll!(m => m[1]) -              (paragraph, rgx_endnote); -          } else { -            if ( type["curly_code"] == 1 || type["tic_code"] == 1 -              || paragraph.matchFirst(block_curly_code_open) -              || paragraph.matchFirst(block_tic_code_open) -            ) { /+ code blocks identified, no munging +/ -              if ( type["curly_code"] == 1 -                && paragraph.matchFirst(block_curly_code_close) -              ) { -                type["curly_code"] = 0; -              } else if (type["tic_code"] == 1 -                && paragraph.matchFirst(block_tic_close) -              ) { -                type["tic_code"] = 0; -              } else if (paragraph.matchFirst(block_curly_code_open)) { -                type["curly_code"] = 1; -              } else if (paragraph.matchFirst(block_tic_code_open)) { -                type["tic_code"] = 1; -              } -              contents ~= paragraph; -            } else { /+ regular content, not a code block +/ -              if (auto m = paragraph.matchAll(rgx_endnote_ref)) { -                foreach (n; m) { -                  endnote_ref_count++; // endnote_refs ~= (n.captures[1]); -                } -              } -              paragraph = replaceAll!(m => " \\\\ " ) -                (paragraph, regex(r"\s*<(?:/\s*|:)?br>\s*")); // (paragraph, regex(r"(<br>)")); -              contents ~= paragraph; -            } -          } -        } -        { -          import std.outbuffer; -          auto buffer = new OutBuffer(); -          if (endnotes.length == endnote_ref_count) { -            // writeln("endnote ref count:         ", endnote_ref_count); -            // writeln("number of binary endnotes: ", endnotes.length); -            int endnote_count = -1; -            foreach (content; contents) { /+ loop to inline endnotes +/ -              content = replaceAll!(m => "~{ " ~ endnotes[++endnote_count] ~ " }~" ~ m["tail"] ) -                (content, rgx_endnote_ref); // endnote_ref cannot occur in a code block or else fail -              buffer.write(content ~ "\n\n"); -            } -            if (buffer) { -              try { -                string dir_out = "converted_output_"; -                string path_and_file_out = dir_out ~ "/" ~ filename; -                dir_out.mkdirRecurse; -                auto f = File(path_and_file_out, "w"); -                f.write(buffer); -                writeln("wrote: ", path_and_file_out); -              } catch (FileException ex) { -                writeln("did not write file"); -                // Handle errors -              } -            } -          } else { -            foreach (content; contents) { /+ loop to inline endnotes +/ -              buffer.write(content ~ "\n\n"); -            } -          } -        } -      } catch (ErrnoException ex) { -        switch(ex.errno) { -          case EPERM: -          case EACCES: // Permission denied -            break; -          case ENOENT: // File does not exist -            break; -          default:     // Handle other errors -            break; -        } -      } -    } -  } -} diff --git a/misc/util/d/tools/markup_conversion/markup_changes_header_and_content.d b/misc/util/d/tools/markup_conversion/markup_changes_header_and_content.d deleted file mode 100755 index 86792ff..0000000 --- a/misc/util/d/tools/markup_conversion/markup_changes_header_and_content.d +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/env rdmd -/+ -  - read in file .sst .ssi .ssm -  - loop twice -    - first -      - check for and skip code blocks -      - use unique code marker for endnote markers in text and give an endnote -        number ★1, increment -      - extract all endnotes in array -    - second -      - check that the footnote marker number count matches the number of notes -        in the array -        - if they match either: -            - substitute each endnote marker with the array footnote[number-1] -            - substitute each endnote marker with footnote -              as inlined footnote markup (footnote number not needed) -        - if they do not match exit -  - check whether changes have been made -    - if so write file with inline footnotes in sub-directory converted_output_/ -      using the same name as the original file -    - else, exit -+/ -import std.stdio; -import std.file; -import std.array : split; -import std.exception; -// import std.range; -import core.stdc.errno; -import std.regex; -import std.format; -import std.conv; -void main(string[] args) { -  static heading_a               = ctRegex!(`^:?[A][~] `, "m"); -  static comment                 = ctRegex!(`^%+ `); -  static block_tic_code_open     = ctRegex!("^`{3} code(?:[.](?P<syntax>[a-z][0-9a-z#+_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?"); -  static block_tic_close         = ctRegex!("^(`{3})$","m"); -  static block_curly_code_open   = ctRegex!(`^(?:code(?:[.](?P<syntax>[a-z][0-9a-z_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?[{][ ]*$)`); -  static block_curly_code_close  = ctRegex!(`^([}]code)`); -  auto rgx_endnote_ref           = ctRegex!(`([~]\^)(?P<tail>[)\]]? |$)`, "gm"); -  auto rgx_endnote               = ctRegex!(`^\^~\s+(.+|\n)`, "gm"); -  char[][] header0Content1(in string src_text) { // cast(char[]) -    /+ split string on _first_ match of "^:?A~\s" into [header, content] array/tuple +/ -    char[][] header_and_content; -    auto m = (cast(char[]) src_text).matchFirst(heading_a); -    header_and_content ~= m.pre; -    header_and_content ~= m.hit ~ m.post; -    assert(header_and_content.length == 2, -      "document markup is broken, header body split == " -      ~ header_and_content.length.to!string -      ~ "; (header / body array split should == 2 (split is on level A~))" -    ); -    return header_and_content; -  } -  foreach(arg; args[1..$]) { -    if ( -      !(arg.match(regex(r"--\w+"))) -      && arg.match(regex(r"\w+?\.ss[itm]")) -    ) { -      writeln(arg); -      string filename                  = arg; -      try { -        string[] munged_header, munged_contents, munged_endnotes, endnote_refs; -        string text                    = filename.readText; -        char[][] hc                    = header0Content1(text); -        char[] src_header              = hc[0]; -        string[] headers               = src_header.to!string.split("\n\n"); -        char[] src_txt                 = hc[1]; -        string[] paragraphs            = src_txt.to!string.split("\n\n"); -        int endnote_ref_count          = 0; -        int[string] type = [ -          "curly_code"                 : 0, -          "tic_code"                   : 0, -        ]; -        string _tmp_header; -        foreach (h_; headers) {                                                                                          /+ loop to inline endnotes +/ -          _tmp_header = ""; -          if (h_.match(regex(r"^[@\[]?title[:\]]?"))) {                                                                  // title -            if (auto m = h_.match(regex(r"^@title:(?:\s+(?P<c>.+)|$)"))) {                                               // sisu bespoke markup -              if (m.captures["c"].length == 0) { -                _tmp_header ~= "title:"; -              } else { -                _tmp_header ~= "title:\n  main: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } -            } else if (auto m = h_.match(regex(r"^title\s*=\s*(?P<c>.+)"))) {                                            // toml? -              if (m.captures["c"].length == 0) { -                _tmp_header ~= "title:"; -              } else { -                _tmp_header ~= "title:\n  main: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } -            } else if (auto m = h_.match(regex(r"^\[title\]"))) {                                                        // toml markup -              _tmp_header ~= "title:"; -            } else if (auto m = h_.match(regex(r"^title(?:\s+(?P<c>.+)|\s+\\$)"))) {                                     // sdlang markup -              if (m.captures["c"].length == 0) { -                _tmp_header ~= "title:"; -              } else { -                _tmp_header ~= "title:\n  main: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } -            } -            if (h_.match(regex(r"^\s*[:]?(?:main)[:= ]?", "m"))) { -              if (auto m = h_.match(regex(r"^\s+(?P<h>:main):(?:\s+(?P<c>.+)|$)", "m"))) {                                // sisu bespoke markup -                _tmp_header ~= "  main: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } else if (auto m = h_.match(regex(r"^\s*(?P<h>main)\s*=\s*(?P<c>.+)", "m"))) {                            // toml? -                _tmp_header ~= "  main: " ~ m.captures["c"]; -              } else if (auto m = h_.match(regex(r"^\s+(?P<h>main)(?:\s*\s*(?P<c>.+)|$)", "m"))) {                       // toml markup -                _tmp_header ~= "  main: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } else if (auto m = h_.match(regex(r"^\s+(?P<h>main)(?:\s+(?P<c>.+)|\s+\\$)", "m"))) {                     // sdlang markup -                _tmp_header ~= "  main: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } -            } -            if (h_.match(regex(r"^\s*[:]?(?:sub(title)?)[:= ]?", "m"))) { -              if (auto m = h_.match(regex(r"^\s+:sub(?:title)?:(?:\s+(?P<c>.+)|$)", "m"))) {                             // sisu bespoke markup -                _tmp_header ~= "  subtitle: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } else if (auto m = h_.match(regex(r"^\s*sub(?:title)?\s*=\s*(?P<c>.+)$", "m"))) {                         // toml? -                _tmp_header ~= "  subtitle: " ~ m.captures["c"]; -              } else if (auto m = h_.match(regex(r"^\s+(?:title)?(?:\s*\s*(?P<c>.+)|$)", "m"))) {                        // toml markup -                _tmp_header ~= "  subtitle: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } else if (auto m = h_.match(regex(r"^\s+(?:title)?(?:\s+(?P<c>.+)|\s+\\$)", "m"))) {                      // sdlang markup -                _tmp_header ~= "  subtitle: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } -            } -          } -          if (h_.match(regex(r"^[@\[]?rights[:\]]?"))) {                                                                 // rights -            if (auto m = h_.match(regex(r"^@rights:[ ]+(?P<c>.+)$"))) {                                                  // sisu bespoke markup -              _tmp_header ~= "rights: \n  copyright: \"" ~ m.captures["c"] ~ "\""; -            } else if (auto m = h_.match(regex(r"^@rights:"))) {                                                         // sisu bespoke markup -              _tmp_header ~= "rights:"; -            } else if (auto m = h_.match(regex(r"^\[rights\]", "m"))) {                                                  // toml markup -              _tmp_header ~= "rights:"; -            } else if (auto m = h_.match(regex(r"^rights:"))) {                                                          // sdlang markup -              _tmp_header ~= "rights:"; -            } -            if (h_.match(regex(r"^\s*[:]?copyright[:= ]?", "m"))) { -              if (auto m = h_.match(regex(r"^\s+:copyright:(?:\s+(?P<c>.+)|$)", "m"))) {                                 // sisu bespoke markup -                _tmp_header ~= "  copyright: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } else if (auto m = h_.match(regex(r"^\s*copyright\s*=\s*(?P<c>.+)", "m"))) {                              // toml? -                _tmp_header ~= "  copyright: " ~ m.captures["c"]; -              } else if (auto m = h_.match(regex(r"^\s+<h>copyright(?:\s*\s*(?P<c>.+)|$)", "m"))) {                      // toml markup -                _tmp_header ~= "  copyright: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } else if (auto m = h_.match(regex(r"^\s+copyright(?:\s+(?P<c>.+)|\s+\\$)", "m"))) {                       // sdlang markup -                _tmp_header ~= "  copyright: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } -            } -            if (h_.match(regex(r"^\s*[:]?licen[cs]e[:= ]?", "m"))) { -              if (auto m = h_.match(regex(r"^\s+:licen[cs]e:(?:\s+(?P<c>.+)|$)", "m"))) {                                // sisu bespoke markup -                _tmp_header ~= "  license: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } else if (auto m = h_.match(regex(r"^\s*licen[cs]e\s*=\s*(?P<c>.+)$", "m"))) {                            // toml? -                _tmp_header ~= "  license: " ~ m.captures["c"]; -              } else if (auto m = h_.match(regex(r"^\s+licen[cs]e(?:\s*\s*(?P<c>.+)|$)", "m"))) {                        // toml markup -                _tmp_header ~= "  license: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } else if (auto m = h_.match(regex(r"^\s+licen[cs]e(?:\s+(?P<c>.+)|\s+\\$)", "m"))) {                      // sdlang markup -                _tmp_header ~= "  license: " ~ "\"" ~ m.captures["c"] ~ "\""; -              } -            } -          } -          if (_tmp_header.length > 0) { -            munged_header ~= _tmp_header; -          } else { -            munged_header ~= h_; -          } -        } -        writeln(munged_header); -        foreach (paragraph; paragraphs) {                                                                                  /+ loop to gather binary endnotes +/ -          if ( !( type["curly_code"] == 1 || type["tic_code"] == 1) -            && paragraph.match(rgx_endnote) -          ) { -            munged_endnotes ~= replaceAll!(m => m[1]) -              (paragraph, rgx_endnote); -          } else { -            if ( type["curly_code"] == 1 || type["tic_code"] == 1 -              || paragraph.matchFirst(block_curly_code_open) -              || paragraph.matchFirst(block_tic_code_open) -            ) { /+ code blocks identified, no munging +/ -              if ( type["curly_code"] == 1 -                && paragraph.matchFirst(block_curly_code_close) -              ) { -                type["curly_code"] = 0; -              } else if (type["tic_code"] == 1 -                && paragraph.matchFirst(block_tic_close) -              ) { -                type["tic_code"] = 0; -              } else if (paragraph.matchFirst(block_curly_code_open)) { -                type["curly_code"] = 1; -              } else if (paragraph.matchFirst(block_tic_code_open)) { -                type["tic_code"] = 1; -              } -              munged_contents ~= paragraph; -            } else { /+ regular content, not a code block +/ -              if (auto m = paragraph.matchAll(rgx_endnote_ref)) { -                foreach (n; m) { -                  endnote_ref_count++; // endnote_refs ~= (n.captures[1]); -                } -              } -              paragraph = replaceAll!(m => " \\\\ " ) -                (paragraph, regex(r"\s*<(?:/\s*|:)?br>\s*")); // (paragraph, regex(r"(<br>)")); -              munged_contents ~= paragraph; -            } -          } -        } -        { -          import std.outbuffer; -          auto buffer = new OutBuffer(); -          foreach (header; munged_header) { /+ loop to inline endnotes +/ -            buffer.write(header ~ "\n\n"); -          } -          if (munged_endnotes.length == endnote_ref_count) { -            int endnote_count = -1; -            foreach (content; munged_contents) { /+ loop to inline endnotes +/ -              content = replaceAll!(m => "~{ " ~ munged_endnotes[++endnote_count] ~ " }~" ~ m["tail"] ) -                (content, rgx_endnote_ref); // endnote_ref cannot occur in a code block or else fail -              buffer.write(content ~ "\n\n"); -            } -            if (buffer) { -              try { -                string dir_out = "converted_output_"; -                string path_and_file_out = dir_out ~ "/" ~ filename; -                dir_out.mkdirRecurse; -                auto f = File(path_and_file_out, "w"); -                f.write(buffer); -                // writeln("wrote: ", path_and_file_out); -              } catch (FileException ex) { -                writeln("did not write file"); -                // Handle errors -              } -            } -          } else { -            foreach (content; munged_contents) { /+ loop to inline endnotes +/ -              buffer.write(content ~ "\n\n"); -            } -          } -        } -      } catch (ErrnoException ex) { -        switch(ex.errno) { -          case EPERM: -          case EACCES: // Permission denied -            break; -          case ENOENT: // File does not exist -            break; -          default:     // Handle other errors -            break; -        } -      } -    } -  } -} diff --git a/misc/util/d/tools/markup_conversion/markup_conversion_from_sisu_ruby_to_sisu_spine.d b/misc/util/d/tools/markup_conversion/markup_conversion_from_sisu_ruby_to_sisu_spine.d deleted file mode 100755 index 0ec541d..0000000 --- a/misc/util/d/tools/markup_conversion/markup_conversion_from_sisu_ruby_to_sisu_spine.d +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/env rdmd -/+ -  - read in file .sst .ssi .ssm -  - loop twice -    - first -      - check for and skip code blocks -      - use unique code marker for endnote markers in text and give an endnote -        number ★1, increment -      - extract all endnotes in array -    - second -      - check that the footnote marker number count matches the number of notes -        in the array -        - if they match either: -            - substitute each endnote marker with the array footnote[number-1] -            - substitute each endnote marker with footnote -              as inlined footnote markup (footnote number not needed) -        - if they do not match exit -  - check whether changes have been made -    - if so write file with inline footnotes in sub-directory converted_output_/ -      using the same name as the original file -    - else, exit -+/ -import std.stdio; -import std.file; -import std.array : split, join; -import std.exception; -// import std.range; -import core.stdc.errno; -import std.regex; -import std.format; -import std.conv; -void main(string[] args) { -  static heading_a               = ctRegex!(`^:?[A][~] `, "m"); -  static comment                 = ctRegex!(`^%+ `); -  static block_tic_code_open     = ctRegex!("^`{3} code(?:[.](?P<syntax>[a-z][0-9a-z#+_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?"); -  static block_tic_close         = ctRegex!("^(`{3})$","m"); -  static block_curly_code_open   = ctRegex!(`^(?:code(?:[.](?P<syntax>[a-z][0-9a-z_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?[{][ ]*$)`); -  static block_curly_code_close  = ctRegex!(`^([}]code)`); -  auto rgx_endnote_ref           = ctRegex!(`([~]\^)(?P<tail>[)\]]? |$)`, "gm"); -  auto rgx_endnote               = ctRegex!(`^\^~\s+(.+|\n)`, "gm"); -  char[][] header0Content1(in string src_text) { // cast(char[]) -    /+ split string on _first_ match of "^:?A~\s" into [header, content] array/tuple +/ -    char[][] header_and_content; -    auto m = (cast(char[]) src_text).matchFirst(heading_a); -    header_and_content ~= m.pre; -    header_and_content ~= m.hit ~ m.post; -    assert(header_and_content.length == 2, -      "document markup is broken, header body split == " -      ~ header_and_content.length.to!string -      ~ "; (header / body array split should == 2 (split is on level A~))" -    ); -    return header_and_content; -  } -  string format_body_string(string s) { -    string o; -    o = s -     .replaceAll(regex("^<(?:/[ ]*)?br>[ ]*"), " \\\\ ") -     .replaceAll(regex("[ ]*<(?:/[ ]*)?br>$"), " \\\\") -     .replaceAll(regex("[ ]*<(?:/[ ]*)?br>[ ]*"), " \\\\ "); -    return o; -  } -  string format_header_string(string s) { -    string o; -    o = s -     .replaceAll(regex("\""), "\\\"") -     .replaceAll(regex("[ ]*<(?:/[ ]*)?br>$"), " \\\\") -     .replaceAll(regex("[ ]*<(?:/[ ]*)?br>[ ]*"), " \\\\ "); -    return o; -  } -  string format_main_header(string hm, string hs = "", string c = "") { -    string o; -    if (c.length == 0) { -      o ~= hm ~ ":\n"; -    } else { -      o ~= hm ~ ":\n" -        ~ "  " ~ hs ~ ": " -        ~ "\"" ~ format_header_string(c) ~ "\"\n"; -    } -    return o; -  } -  string format_sub_header(string hs, string c) { -    string o; -    o ~= "  " ~ hs ~ ": " -      ~ "\"" ~ format_header_string(c) ~ "\"\n"; -    return o; -  } -  foreach(arg; args[1..$]) { -    if ( -      !(arg.match(regex(r"--\w+"))) -      && arg.match(regex(r"\w+?\.ss[itm]")) -    ) { -      writeln(arg); -      string filename                  = arg; -      try { -        string[] munged_header, munged_contents, munged_endnotes, endnote_refs; -        char[][] hc; -        char[] src_header; -        string[] headers; -        char[] src_txt; -        string[] paragraphs; -        enum codeBlock { off, curly, tic, } -        string _tmp_header; -        int endnote_ref_count          = 0; -        int code_block_status          = codeBlock.off; -        string text                    = filename.readText; -        if (arg.match(regex(r"\w+?\.ss[tm]"))) { -          hc                           = header0Content1(text); -          src_header                   = hc[0]; -          headers                      = src_header.to!string.split("\n\n"); -          src_txt                      = hc[1]; -          paragraphs                   = src_txt.to!string.split("\n\n"); -        } else if (arg.match(regex(r"\w+?\.ssi"))) { -          headers                      = []; -          paragraphs                   = text.split("\n\n"); -        } -        if (headers.length > 0) { -          headers[0] = headers[0].replaceFirst(regex(r"^%\s+SiSU.+", "i"), "# SiSU 8.0 spine (auto-conversion)"); -          foreach (h_; headers) { -            _tmp_header = ""; -            if (auto m = h_.match(regex(r"^%\s*", "m"))) { -              h_ = h_.replaceAll(regex(r"^%\s*", "m"), "# ") ~ "\n"; -            } -            if (h_.match(regex(r"^@title:|@subtitle"))) { -              if (auto m = h_.match(regex(r"^@(?P<h>title):(?:[ ]+(?P<c>.+)|\n)"))) { -                _tmp_header ~= format_main_header(m.captures["h"], "main", m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^@(?P<h>subtitle):(?:[ ]+(?P<c>.+)|$)"))) { -                if (m.captures["c"].length == 0) { -                } else { -                  _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -                } -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>main):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:sub(?:title)?:(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header("subtitle", m.captures["c"]); -              } -            } else if (h_.match(regex(r"^@creator:|@author:"))) { -              if (auto m = h_.match(regex(r"^(?:@creator:|@author:)(?:[ ]+(?P<c>.+)|\n)"))) { -                _tmp_header ~= format_main_header("creator", "author", m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>author):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -            } else if (h_.match(regex(r"^@rights:"))) { -              if (auto m = h_.match(regex(r"^@(?P<h>rights):(?:[ ]+(?P<c>.+)|\n)"))) { -                _tmp_header ~= format_main_header(m.captures["h"], "copyright", m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>copyright):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:licen[cs]e:(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header("license", m.captures["c"]); -              } -            } else if (h_.match(regex(r"^@date:|@date\."))) { -              if (auto m = h_.match(regex(r"^@(?P<h>date):(?:[ ]+(?P<c>.+)|\n)"))) { -                _tmp_header ~= format_main_header(m.captures["h"], "published", m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>published):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>available):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>modified):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>created):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>issued):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>valid):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^@date\.(?P<h>available):[ ]+(?P<c>.+)$"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^@date\.(?P<h>modified):[ ]+(?P<c>.+)$"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^@date\.(?P<h>created):[ ]+(?P<c>.+)$"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^@date\.(?P<h>issued):[ ]+(?P<c>.+)$"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^@date\.(?P<h>valid):[ ]+(?P<c>.+)$"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -            } else if (h_.match(regex(r"^@classify:"))) { -              if (auto m = h_.match(regex(r"^@classify:"))) { -                _tmp_header ~= "classify:\n"; -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>topic_register):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:type:(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= "#  type: " ~ "\"" ~ m.captures["c"] ~ "\"\n"; -              } -            } else if (h_.match(regex(r"^(?:@identifier:|@identify:)"))) { -              if (auto m = h_.match(regex(r"^(?:@identifier:|@idenfify)"))) { -                _tmp_header ~= "identify:\n"; -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>oclc):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>isbn):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>dewey):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -            } else if (h_.match(regex(r"^@publisher:"))) { -              if (auto m = h_.match(regex(r"^@publisher:[ ]+(?P<c>.+)$"))) { -                _tmp_header ~= "publisher: " ~  "\"" ~ m.captures["c"] ~ "\"\n"; -              } -            } else if (h_.match(regex(r"^@make:"))) { -              // writeln(h_); -              if (auto m = h_.match(regex(r"^@make:"))) { -                _tmp_header ~= "make:\n"; -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>breaks):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>num_top):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>headings):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>italics):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>bold):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>emphasis):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>substitute):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>texpdf_font):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>home_button_text):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>home_button_image):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>cover_image):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              if (auto m = h_.match(regex(r"^\s+:(?P<h>footer):(?:[ ]+(?P<c>.+)|$)", "m"))) { -                _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]); -              } -              // writeln(_tmp_header); -            } else if (h_.match(regex(r"^@\w+:"))) { -              _tmp_header ~= "# " ~ h_.split("\n").join("\n# ") ~ "\n"; -            } else if (h_.match(regex(r"^\s+:\w+:", "m"))) { -              if (auto m = h_.match(regex(r"^(?P<g>\s+:\w+:.*)"))) { -                _tmp_header ~= "# " ~ m.captures["g"] ~ "\n"; -              } -            } -            if (h_.match(regex(r"^#", "m"))) { -              if (auto m = h_.match(regex(r"^(?P<g>#.*)", "m"))) { -                _tmp_header ~= m.captures["g"] ~ "\n"; -              } -             } -            if (_tmp_header.length > 0) { -              munged_header ~= _tmp_header.split("\n\n"); -            } else if (h_.length > 0) { -              writeln("munging required: ", h_); -              h_ = h_.replaceAll((regex(r"\n\n\n+", "m")), "\n\n"); -              munged_header ~= h_; -            } -          } -          // writeln(munged_header.join("\n")); -        } -        foreach (paragraph; paragraphs) {                                                                                  /+ loop to gather binary endnotes +/ -          if (code_block_status == codeBlock.off -            && paragraph.match(rgx_endnote) -          ) { -            munged_endnotes ~= replaceAll!(m => m[1]) -              (paragraph, rgx_endnote); -          } else { -            if ( code_block_status != codeBlock.off -              || paragraph.matchFirst(block_curly_code_open) -              || paragraph.matchFirst(block_tic_code_open) -            ) { /+ code blocks identified, no munging +/ -              if ((code_block_status == codeBlock.curly -                  && paragraph.matchFirst(block_curly_code_close)) -                || (code_block_status == codeBlock.tic -                  && paragraph.matchFirst(block_tic_close)) -              ) { -                code_block_status = codeBlock.off; -              } else if (paragraph.matchFirst(block_curly_code_open)) { -                code_block_status = codeBlock.curly; -              } else if (paragraph.matchFirst(block_tic_code_open)) { -                code_block_status = codeBlock.tic; -              } -              munged_contents ~= paragraph; -            } else { /+ regular content, not a code block +/ -              if (auto m = paragraph.matchAll(rgx_endnote_ref)) { -                foreach (n; m) { -                  endnote_ref_count++; // endnote_refs ~= (n.captures[1]); -                } -              } -              paragraph = format_body_string(paragraph); -              // paragraph = replaceAll!(m => " \\\\ " ) -              //   (paragraph, regex(r"\s*<(?:/\s*|:)?br>\s*")); // (paragraph, regex(r"(<br>)")); -              munged_contents ~= paragraph; -            } -          } -        } -        { -          import std.outbuffer; -          auto buffer = new OutBuffer(); -          if (munged_header.length > 0) { -            foreach (header; munged_header) { /+ loop to inline endnotes +/ -              buffer.write(header ~ "\n"); -            } -          } -          if (munged_endnotes.length == endnote_ref_count) { -            int endnote_count = -1; -            foreach (k, content; munged_contents) { /+ loop to inline endnotes +/ -              content = replaceAll!(m => "~{ " ~ munged_endnotes[++endnote_count] ~ " }~" ~ m["tail"] ) -                (content, rgx_endnote_ref); // endnote_ref cannot occur in a code block or else fail -              buffer.write(content ~ ((k == munged_contents.length - 1) ? "" : "\n\n")); -            } -            if (buffer) { -              try { -                string dir_out = "converted_output_"; -                string path_and_file_out = dir_out ~ "/" ~ filename; -                dir_out.mkdirRecurse; -                auto f = File(path_and_file_out, "w"); -                f.write(buffer); -                // writeln("wrote: ", path_and_file_out); -              } catch (FileException ex) { -                writeln("did not write file"); -                // Handle errors -              } -            } -          } else { -            foreach (content; munged_contents) { /+ loop to inline endnotes +/ -              buffer.write(content ~ "\n\n"); -            } -          } -        } -      } catch (ErrnoException ex) { -        switch(ex.errno) { -          case EPERM: -          case EACCES: // Permission denied -            break; -          case ENOENT: // File does not exist -            break; -          default:     // Handle other errors -            break; -        } -      } -    } -  } -} diff --git a/misc/util/d/tools/spine_scaffold.d b/misc/util/d/tools/spine_scaffold.d deleted file mode 100755 index dbcc857..0000000 --- a/misc/util/d/tools/spine_scaffold.d +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env rund -void main( string[] args ) { -  import std; -  if (args.length > 1) { -    string base_fn_path = args[1].expandTilde; -    string base_fn = base_fn_path.baseName; -    string sst_fn = base_fn ~ ".sst"; -    string txt_for_pod_manifest = format(q"┃doc: -  filename: %s -  language: en -┃", -      sst_fn -    ); -    string txt_for_sisu_document_make = format(q"┃ -┃", -    ); -    string txt_for_document_scaffold = format(q"┃# SiSU 8.0 - -title: "As Yet Unnamed" - -creator: -  author: "Annon, Unnamed" - -:A~ @title @author - -1~ Summary - -To get you started, the first paragraph following a section or chapter heading. - -Spine / SiSU documents minimum requirements: - -_* a header containing document metadata that must at least contain the fields Title and Creator Author. - -_* text body, identified as starting by the A~ marker at the start of a line, followed  by at least one level 1~ section heading with the text that follows it.~{ the document provided here would be a valid Spine document, and this text contained within the tilde and curly braces delimiters would be the first footnote/endnote }~ - -To generate this document to have html and epub output for example you would run: - -``` code -spine --html --epub --output=/tmp/spine-sample-output %s -``` - -1~ Conclusion - -This sample pod is provided to get you started. - -Good luck and good speed. -┃", -      base_fn_path -    ); -    if (!exists(base_fn_path)) { -      try { -        base_fn_path.mkdirRecurse; -      } catch (ErrnoException ex) { -        writeln(ex); -      } -      if (exists(args[1].expandTilde)) { -        try { -          base_fn_path.buildPath("conf").mkdirRecurse; -        } catch (ErrnoException ex) { -          writeln(ex); -        } -        try { -          base_fn_path.buildPath("media/text/en").mkdirRecurse; -        } catch (ErrnoException ex) { -          writeln(ex); -        } -        { -          // 1 // create/write pod.manifest -          string fn = base_fn_path.buildPath("pod.manifest"); -          File(fn, "w").writeln(txt_for_pod_manifest); -          string tell = format(q"┃OK - pod.manifest (yaml file containing filename and languages) -  %s -%s -┃", -            fn, -            txt_for_pod_manifest.strip -          ); -          writeln(tell); -        } -        if (exists(base_fn_path.buildPath("conf"))) { -          // 2 // create/write conf/sisu_document_make -          string fn = base_fn_path.buildPath("conf/sisu_document_make"); -          File(fn, "w").writeln(txt_for_sisu_document_make); -          // auto f = File(fn, "w"); -          // foreach (line; content_array) { -          //   f.writeln(line); -          // } -          string tell = format(q"┃OK - sisu_document_make -  %s -┃", -            fn -          ); -          writeln(tell); -        } -        if (exists(base_fn_path.buildPath("media/text/en"))) { -          // 3 // create/write media/text/[lang code]/[filename].sst -          string fn = base_fn_path.buildPath("media/text/en/" ~ sst_fn); -          File(fn, "w").writeln(txt_for_document_scaffold); -          // auto f = File(fn, "w"); -          // foreach (line; content_array) { -          //   f.writeln(line); -          // } -          string tell = format(q"┃OK - .sst [document text content] -  %s -  - To start editing document (spine pod content): -      ${EDITOR} %s -  - To generate this document to have html and epub output for example you would run: -      spine --html --epub --output=/tmp/spine-sample-output %s -┃", -            fn, -            fn, -            base_fn_path -          ); -          writeln(tell); -        } -      } -      /+ -        pod/[filename] -          │ -          ├── conf -          │   └── sisu_document_make -          ├── media -          │   └── text -          │       └── en -          │           └── [filename].charles_stross.sst -          └── pod.manifest -      +/ -    } else { -      writeln("requested output pod name with path already exists:\n  ", args[1].expandTilde); -    } -  } else { -    writeln( "please provide directory path to operate on, e.g.\n  spine_scaffold.d ./pod/filetest" ); -  } -} diff --git a/misc/util/rb/cgi/spine.search.cgi b/misc/util/rb/cgi/spine.search.cgi deleted file mode 100755 index ff2bfc2..0000000 --- a/misc/util/rb/cgi/spine.search.cgi +++ /dev/null @@ -1,952 +0,0 @@ -#!/usr/bin/env ruby -=begin - * Name: SiSU information Structuring Universe - * Author: Ralph Amissah -   * https://sisudoc.org -   * https://git.sisudoc.org - - * Description: generates naive cgi search form for search of sisu database (sqlite) - * Name: SiSU generated sample cgi search form - - * Description: generated sample cgi search form for SiSU -   (SiSU is a framework for document structuring, publishing and search) - - * Author: Ralph Amissah - - * Copyright: (C) 1997 - 2014, Ralph Amissah, All Rights Reserved. - - * License: GPL 3 or later: - -   SiSU, a framework for document structuring, publishing and search - -   Copyright (C) Ralph Amissah - -   This program is free software: you can redistribute it and/or modify it -   under the terms of the GNU General Public License as published by the Free -   Software Foundation, either version 3 of the License, or (at your option) -   any later version. - -   This program is distributed in the hope that it will be useful, but WITHOUT -   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for -   more details. - -   You should have received a copy of the GNU General Public License along with -   this program. If not, see <https://www.gnu.org/licenses/>. - -   If you have Internet connection, the latest version of the GPL should be -   available at these locations: -   <https://www.fsf.org/licenses/gpl.html> -   <https://www.gnu.org/licenses/gpl.html> - - * SiSU uses: -   * Standard SiSU markup syntax, -   * Standard SiSU meta-markup syntax, and the -   * Standard SiSU object citation numbering and system - - * Hompages: -   <https://www.sisudoc.org> - - * Ralph Amissah -   <ralph@amissah.com> -   <ralph.amissah@gmail.com> - -=end -begin -  require 'cgi' -  require 'fcgi' -  require 'sqlite3' -rescue LoadError -  puts 'cgi, fcgi or sqlite3 NOT FOUND (LoadError)' -end -@stub_default            = 'search' -@image_src               = "https://#{ENV['HTTP_HOST']}/image_sys" -@hosturl_cgi             = "https://#{ENV['HTTP_HOST']}#{ENV['PATH_INFO']}" -@hosturl_files           = "https://#{ENV['HTTP_HOST']}" -@output_dir_structure_by = 'language' -@lingual                 = 'multi' -@db_name_prefix          = 'spine.' -@base                    = "https://#{ENV['HTTP_HOST']}#{ENV['PATH_INFO']}#{ENV['SCRIPT_NAME']}" -#Common TOP -@@offset                 = 0 -@@canned_search_url      = @base -@color_heading           = '#DDFFAA' -@color_match             = '#ffff48' -class Form -  def initialize(base,search_field,selected_db,result_type,checked_sql_limit,checked_tip,checked_stats,checked_searched,checked_url,checked_case,checked_echo,checked_sql,checked_all,checked_none,checked_selected,checked_default,search_note,the_can='') -    search_note  = '' if checked_searched !~ /\S/ -    the_can      = '' if checked_url      !~ /\S/ -    search_field = '' if checked_echo     !~ /\S/ -    @base,@search_field,@selected_db,@result_type,@checked_sql_limit,@checked_tip,@checked_stats,@checked_searched,@checked_url,@checked_case,@checked_echo,@checked_sql,@checked_all,@checked_none,@checked_selected,@checked_default,@search_note,@the_can=base,search_field,selected_db,result_type,checked_sql_limit,checked_tip,checked_stats,checked_searched,checked_url,checked_case,checked_echo,checked_sql,checked_all,checked_none,checked_selected,checked_default,search_note,the_can -    @tip         = if checked_tip         =~ /\S/ -      '<font size="2" color="#666666">text:__; fulltxt:__; keywords:__; title:__; author:__; topic_register:__; subject:__; description:__; publisher:__; editor:__; contributor:__; date:__; type:__; format:__; identifier:__; source:__; language:__; relation:__; coverage:__; rights:__; comment:__; abstract:__; src_filename_base:__;</font><br />' -    else '' -    end -  end -  def submission_form -    search_form =<<-WOK -    <!DOCTYPE html> -    <html> -    <head> -      <title> -      <meta charset="utf-8"> -      <meta name="sourcefile" content="SiSU._sst" /> -      SiSU search form (sample): SiSU information Structuring Universe -      </title> -      <link rel="generator" href="https://sisudoc.org/" /> -      <link rel="shortcut icon" href="https://#{ENV['HTTP_HOST']}/_sisu/image_sys/rb7.ico" /> -      <link href="../_sisu/css/html.css" rel="stylesheet"> -    </head> -    <body lang="en" xml:lang="en"> -    <table summary="band" border="0" cellpadding="3" cellspacing="0"> -    <tr><td width="20%"> -     <table summary="home button / home information" border="0" cellpadding="3" cellspacing="0"> -     <tr><td align="left"> -      <br /><a href="https://sisudoc.org/" target="_top"> -        <b>SiSU</b> -      </a> -      <br /><a href="https://git.sisudoc.org/" target="_top"> -        git -      </a> -     </td></tr> -     </table> -    </td> -    <td> -      <label for="find"><b>SiSU (generated sample) search form (content organised by filetype)</b></label> -    </td></tr> -    </table> -    <form action="#{@base}" id="Test Form" method="post"> -      <table cellpadding="2"> -      <tr><td valign=\"top\"> -          <textarea id="find" name="find" type="text" rows="6" cols="40" maxlength="256">#{@search_field}</textarea> -      </td> -      <td valign=\"top\"> -        #{@tip} -        #{@search_note} -        #{@the_can} -      </td></tr></table> -      <td valign=\"top\"><tr><td> -        <!input type="text" id="find" name="find" value="#{@search_field}" /> -        <!input type="text" id="find" name="find" value="" /> -        <font size="2" color="#222222"> -        <b>to search:</b> select which database to search (drop-down menu below); enter your search query (in the form above); and <b>click on the search button</b> (below) -        <br /> -        <select name="db" size="1"> -          #{@selected_db} -              <option value="spine.sqlite">spine</option> -        </select> -        <input type="submit" value="SiSU search" /> -        <input type="radio" name="view" value="index" #{@result_type[:index]}> index -        <input type="radio" name="view" value="text" #{@result_type[:text]}> text / grep -        <br /> -          match limit: -          <input type="radio" name="sql_match_limit" value="1000" #{@checked_sql_limit[:l1000]}> 1,000 -          <input type="radio" name="sql_match_limit" value="2500" #{@checked_sql_limit[:l2500]}> 2,500 -        <br /> -          <input type="checkbox" name="echo" #{@checked_echo}> echo query -          <input type="checkbox" name="stats" #{@checked_stats}> result stats -          <input type="checkbox" name="url" #{@checked_url}> search url -          <input type="checkbox" name="searched" #{@checked_searched}> searched -          <input type="checkbox" name="tip" #{@checked_tip}> available fields -          <input type="checkbox" name="sql" #{@checked_sql}> sql statement -        <br /> -          checks: -          <input type="radio" name="checks" value="check_default" #{@checked_default}> default -          <input type="radio" name="checks" value="check_selected" #{@checked_selected}> selected -          <input type="radio" name="checks" value="check_all" #{@checked_all}> all -          <input type="radio" name="checks" value="check_none" #{@checked_none}> none -          </font> -      </td></tr> -      </table> -    </form> -    WOK -  end -end -class SearchRequest                                                       #% search_for -  attr_accessor :text1,:fulltext,:keywords,:title,:author,:topic_register,:subject,:description,:publisher,:editor,:contributor,:date,:type,:format,:identifier,:source,:language,:relation,:coverage,:rights,:comment,:abstract,:owner,:date_created,:date_issued,:date_modified,:date_available,:date_valid,:src_filename_base -  def initialize(search_field='',q='') -    @search_field,@q=search_field,q -    @text1=@fulltext=@keywords=@title=@author=@topic_register=@subject=@description=@publisher=@editor=@contributor=@date=@type=@format=@identifier=@source=@language=@relation=@coverage=@rights=@comment=@abstract=@owner=@date_created=@date_issued=@date_modified=@date_available=@date_valid=@filename='' -    if @search_field=~/\S/ -      @text1          = text_to_match('text:') -      @fulltext       = text_to_match('fulltxt:') -      @topic_register = text_to_match('topic_register:') -      @title          = text_to_match('title:')                # DublinCore 1  - title -      @author         = text_to_match('(?:author|creator)s?:') # DublinCore 2  - creator/author -      @subject        = text_to_match('subj(?:ect)?:')         # DublinCore 3  - subject -      @description    = text_to_match('description:')          # DublinCore 4  - description -      @publisher      = text_to_match('pub(?:lisher)?:')       # DublinCore 5  - publisher -      @editor         = text_to_match('editor:') -      @contributor    = text_to_match('contributor:')          # DublinCore 6  - contributor -      @date           = text_to_match('date:')                 # DublinCore 7  - date dd-mm-yy -      @type           = text_to_match('type:')                 # DublinCore 8  - type -      @format         = text_to_match('format:')               # DublinCore 9  - format -      @identifier     = text_to_match('identifier:')           # DublinCore 10 - identifier -      @source         = text_to_match('source:')               # DublinCore 11 - source -      @language       = text_to_match('language:')             # DublinCore 12 - language -      @relation       = text_to_match('relation:')             # DublinCore 13 - relation -      @coverage       = text_to_match('coverage:')             # DublinCore 14 - coverage -      @rights         = text_to_match('rights:')               # DublinCore 15 - rights -      @keywords       = text_to_match('key(?:words?)?:') -      @comment        = text_to_match('comment:') -      @abstract       = text_to_match('abs(?:tract)?:') -      @owner          = text_to_match('owner:') -      @date_created   = text_to_match('date_created:') -      @date_issued    = text_to_match('date_issued:') -      @date_modified  = text_to_match('date_modified:') -      @date_available = text_to_match('date_available:') -      @date_valid     = text_to_match('date_valid:') -      @filename       = text_to_match('src_filename_base:') -      @text1          = text_to_match unless @keywords or @author or @title or @text1 or @fulltext or @comment or @abstract or @rights or @subject or @publisher or @date or @filename or @topic_register -    else -      @text1          = q['s1']   if q['s1']   =~ /\S/ -      @fulltext       = q['ft']   if q['ft']   =~ /\S/ -      @keywords       = q['key']  if q['key']  =~ /\S/ -      @title          = q['ti']   if q['ti']   =~ /\S/ -      @author         = q['au']   if q['au']   =~ /\S/ -      @topic_register = q['tr']   if q['tr']   =~ /\S/ -      @subject        = q['sj']   if q['sj']   =~ /\S/ -      @description    = q['dsc']  if q['dsc']  =~ /\S/ -      @publisher      = q['pb']   if q['pb']   =~ /\S/ -      @editor         = q['cntr'] if q['cntr'] =~ /\S/ -      @contributor    = q['cntr'] if q['cntr'] =~ /\S/ -      @date           = q['dt']   if q['dt']   =~ /\S/ -      @type           = q['ty']   if q['ty']   =~ /\S/ -      @identifier     = q['id']   if q['id']   =~ /\S/ -      @source         = q['src']  if q['src']  =~ /\S/ -      @language       = q['lang'] if q['lang'] =~ /\S/ -      @relation       = q['rel']  if q['rel']  =~ /\S/ -      @coverage       = q['cov']  if q['cov']  =~ /\S/ -      @rights         = q['cr']   if q['cr']   =~ /\S/ -      @comment        = q['co']   if q['co']   =~ /\S/ -      @abstract       = q['ab']   if q['ab']   =~ /\S/ -      @date_created   = q['dtc']  if q['dtc']  =~ /\S/ -      @date_issued    = q['dti']  if q['dti']  =~ /\S/ -      @date_modified  = q['dtm']  if q['dtm']  =~ /\S/ -      @date_available = q['dta']  if q['dta']  =~ /\S/ -      @date_valid     = q['dtv']  if q['dtv']  =~ /\S/ -      @filename = if q['doc'] and q['search'] !~ /search db/ then q['doc'] -        elsif        q['fns'] =~ /\S/                        then q['fns'] -        end -      @@limit         = q['ltd']  if q['ltd']  =~ /\d+/ # 1000 -      @@offset        = q['off']  if q['off']  =~ /\d+/ # 0 -    end -  end -  def text_to_match(identifier='') -    m={ -      string: /#{identifier}\s*(.+?)/, -      string: /#{identifier}\s*(.+?)(?:;|\n|\r|$)/, -      word:   /#{identifier}[\s(]*(\S+)/ -    } -    search_string=if @search_field =~m[:word] -      search_string=if @search_field =~m[:braces] then m[:braces].match(@search_field)[1] -      elsif @search_field =~m[:string]            then m[:string].match(@search_field)[1] -      else -        str=m[:word].match(@search_field)[1] -        str=str.gsub(/[()]/,'') -        str -      end -      search_string=search_string.strip.gsub(/\s+/,'+') -    #else -    #  "__" -    end -  end -end -class DBI_SearchString -  def initialize(l,t,q,cse=false) -    @l,@t,@q=l,t,q -  end -  def string -    search={ search: [], flag: false } -    if @t =~/\S+/ or @q =~/\S+/ -      if @t =~/\S+/    then unescaped_search=CGI.unescape(@t) -      elsif @q =~/\S+/ then unescaped_search=CGI.unescape(@q) -      end -      search_construct=[] -      unescaped_search=unescaped_search.gsub(/\s*(AND|OR)\s*/,"%' \) \\1 #{@l} LIKE \( '%"). -        gsub(/(.+)/,"#{@l} LIKE \( '%\\1%' \)") -      search_construct << unescaped_search -      search_construct=search_construct.join(' ') -      search[:search]                    << search_construct -      search[:flag]=true -      search -    end -    search -  end -end -class DBI_SearchStatement -  attr_reader :text_search_flag,:sql_select_body_format,:sql_offset,:sql_limit -  def initialize(conn,search_for,q,c) -    @conn=conn -    @text_search_flag=false -    @sql_statement={ body: '', endnotes: '', range: '' } -    #@offset||=@@offset -    #@offset+=@@limit -    search={ text: [], endnotes: [] } -    cse=(c =~/\S/) ? true : false -    st=DBI_SearchString.new('doc_objects.clean',search_for.text1,q['s1'],cse).string -    se=DBI_SearchString.new('endnotes.clean',search_for.text1,q['s1'],cse).string -    @text_search_flag=st[:flag] -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.fulltext',search_for.fulltext,q['ft'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.title',search_for.title,q['ti'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.creator_author',search_for.author,q['au'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.classify_topic_register',search_for.topic_register,q['tr'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.classify_subject',search_for.subject,q['sj'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.classify_keywords',search_for.keywords,q['key'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.notes_description',search_for.description,q['dsc'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.publisher',search_for.publisher,q['pb'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.creator_editor',search_for.editor,q['cntr'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.creator_contributor',search_for.contributor,q['cntr'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.date_published',search_for.date,q['dt'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.notes_type',search_for.type,q['ty'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.original_source',search_for.source,q['src'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.language_document_char',search_for.language,q['lang'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.notes_relation',search_for.relation,q['rel'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.notes_coverage',search_for.coverage,q['cov'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.rights_all',search_for.rights,q['cr'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.notes_comment',search_for.comment,q['co'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.notes_abstract',search_for.abstract,q['ab'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    st = DBI_SearchString.new('metadata_and_text.src_filename_base',search_for.src_filename_base,q['fns'],cse).string -    if st[:flag] -      search[:text]                    << st[:search] -    end -    @@limit=q['ltd'] if q['ltd']=~/\d+/  # 1000 -    @@offset=q['off'] if q['off']=~/\d+/ # 0 -    @search_text='' -    @search_text=search[:text].flatten.join(' AND ') -    @search_text=@search_text.gsub(/(doc_objects\.clean\s+LIKE\s+\(\s*'%[^']+%'\s*\)\s+(?:(?:AND|OR)\s+doc_objects\.clean\s+LIKE\s+\(\s*'%[^']+%'\s*\))+)/,'(\1)') -  end -  def sql_offset -    @@offset -  end -  def sql_match_limit -    @@limit -  end -  def sql_canned_search -    @offset_next=sql_offset.to_i + sql_match_limit.to_i -    @offset_previous=sql_offset.to_i - sql_match_limit.to_i -    def current -      @@canned_search_url.to_s + '<d=' + sql_match_limit.to_s + '&off=' + sql_offset.to_s -    end -    def next -      @@canned_search_url.to_s + '<d=' + sql_match_limit.to_s + '&off=' + @offset_next.to_s -    end -    def previous -      @offset_previous >= 0 \ -      ? (@@canned_search_url.to_s + '<d=' + sql_match_limit.to_s + '&off=' + @offset_previous.to_s) -      : '' -    end -    def start -      @@canned_search_url.to_s + '<d=' + sql_match_limit.to_s + '&off=' + 0.to_s -    end -    self -  end -  def pre_next(beyond_limit,img) -    can=sql_canned_search -    page=(sql_offset.to_i + sql_match_limit.to_i)/sql_match_limit.to_i -    if beyond_limit -      if page.to_s =~ /^1$/ -        %{<br /><center> -        pg. #{page.to_s} -        <a href="#{can.next}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_next_red.png" alt=" >>" /> -        </a> -        </center>} -      elsif page.to_s =~ /^2$/ -        %{<br /><center> -        <a href="#{can.previous}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="<< " /> -        </a> -        pg. #{page.to_s} -        <a href="#{can.next}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_next_red.png" alt=" >>" /> -        </a> -        </center>} -      else -        %{<br /><center> -        <a href="#{can.start}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="|< " /> -        </a> -        <a href="#{can.previous}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="<< " /> -        </a> -        pg. #{page.to_s} -        <a href="#{can.next}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_next_red.png" alt=" >>" /> -        </a> -        </center>} -      end -    else -      if page.to_s =~ /^1$/ then '' -      elsif page.to_s =~ /^2$/ -        %{<br /><center> -        <a href="#{can.previous}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="<< " /> -        </a> -        pg. #{page.to_s} -        </center>} -      else -        %{<br /><center> -        <a href="#{can.start}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="|< " /> -        </a> -        <a href="#{can.previous}"> -          <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="<< " /> -        </a> -        pg. #{page.to_s} -        </center>} -      end -    end -  end -  def sql_select_body -    limit  ||= @@limit -    offset ||= @@offset -    @sql_statement[:body]  = %{ -      SELECT metadata_and_text.title, metadata_and_text.creator_author, metadata_and_text.src_filename_base, metadata_and_text.language_document_char, metadata_and_text.notes_suffix, doc_objects.body, doc_objects.seg_name, doc_objects.ocn, metadata_and_text.uid -      FROM doc_objects, metadata_and_text -      WHERE #{@search_text} AND doc_objects.uid_metadata_and_text = metadata_and_text.uid -      ORDER BY metadata_and_text.language_document_char, metadata_and_text.title, metadata_and_text.src_filename_base, doc_objects.ocn -    } -    @sql_statement[:range] = %{LIMIT #{limit} OFFSET #{offset} ;} -    select                 = @sql_statement[:body] + ' ' + @sql_statement[:range] -    select -  end -  def sql_select_body_format -    %{<font color="#666666" size="2">#{sql_select_body}</font>} -  end -  def contents -    @conn.execute(sql_select_body) -  end -end -def tail -  <<-'WOK' -    <br /><hr /><br /> -<table summary="SiSU summary" cellpadding="2" border="0"> -  <!-- widget sisu --> -<tr><td valign="top" width="10%"> - <table summary="home button / home information" border="0" cellpadding="3" cellspacing="0"> - <tr><td align="left"> -  <br /><a href="https://sisudoc.org/" target="_top"> -    <b>SiSU</b> -  </a> -  <br /><a href="https://git.sisudoc.org/" target="_top"> -    git -  </a> - </td></tr> - </table> -</td> -<td valign="top" width="45%"> -<!-- SiSU Rights --> -  <p class="tiny_left"><font color="#666666" size="2"> -    Generated by -      SiSU 6.3.1 2014-10-19 (2014w41/7) -    <br /> -    <a href="https://www.sisudoc.org" > -    <b>SiSU</b></a> <sup>©</sup> Ralph Amissah -    1993, current 2014. -    All Rights Reserved. -    <br /> -      SiSU is software for document structuring, publishing and search, -    <br /> -      <a href="https://www.sisudoc.org" > -      www.sisudoc.org -    </a> -    sources -    <a href="https://git.sisudoc.org" > -      git.sisudoc.org -    </a> -  <br /> -    <i>w3 since October 3 1993</i> -    <a href="mailto:ralph@amissah.com" > -      ralph@amissah.com -    </a> -  <br /> -    mailing list subscription -    <a href="https://lists.sisudoc.org/listinfo/sisu" > -      https://lists.sisudoc.org/listinfo/sisu -    </a> -  <br /> -    <a href="mailto:sisu@lists.sisudoc.org" > -      sisu@lists.sisudoc.org -    </a> -  </font></p> -</td><td valign="top" width="45%"> -  <p class="tiny_left"><font color="#666666" size="2"> -    SiSU using: -    <br />Standard SiSU markup syntax, -    <br />Standard SiSU meta-markup syntax, and the -    <br />Standard SiSU <u>object citation numbering</u> and system, (object/text identifying/locating system) -  <br /> -    <sup>©</sup> Ralph Amissah 1997, current 2014. -    All Rights Reserved. -  </font></p> -</td></tr> -  <!-- widget way better --> -<tr><td valign="top" width="10%"> -  <p class="tiny_left"><font color="#666666" size="2"> -    <a href="https://www.gnu.org/licenses/gpl.html"> -      .: -    </a> -  </font></p> -</td><td valign="top" width="45%"> -  <p class="tiny_left"><font color="#666666" size="2"> -    SiSU is released under -    <a href="https://www.gnu.org/licenses/gpl.html">GPL v3</a> -    or later, -    <a href="https://www.gnu.org/licenses/gpl.html"> -      https://www.gnu.org/licenses/gpl.html -    </a> -  </font></p> -</td><td valign="top" width="45%"> -  <p class="tiny_left"><font color="#666666" size="2"> -    SiSU, developed using -    <a href="https://www.ruby-lang.org/en/"> -      Ruby -    </a> -    on -    <a href="https://www.debian.org/"> -      Debian/Gnu/Linux -    </a> -    software infrastructure, -    with the usual GPL (or OSS) suspects. -  </font></p> -</td></tr> -</table> -    <a name="bottom" id="bottom"></a><a name="down" id="down"></a><a name="end" id="end"></a><a name="finish" id="finish"></a><a name="stop" id="stop"></a><a name="credits" id="credits"></a> -    </body></html> -  WOK -end -@tail=tail -@counter_txt_doc,@counter_txt_ocn,@counter_endn_doc,@counter_endn_ocn=0,0,0,0 -@counters_txt,@counters_endn,@sql_select_body='','','' -FCGI.each_cgi do |cgi| -  begin # all code goes in begin section -    @search={ text: [], endnotes: [] } -    q=CGI.new -    @db=if cgi['db'] =~ /#{@db_name_prefix}(\S+)/ -      @stub=$1 -      cgi['db'] -    else -      @stub=@stub_default -      @db_name_prefix + @stub -    end -    checked_url,checked_stats,checked_searched,checked_tip,checked_case,checked_echo,checked_sql,checked_all,checked_none,checked_selected,checked_default,selected_db='','','','','','','','','' -    result_type=(cgi['view']=~/text/) \ -    ? result_type={ index: '', text: 'checked'} -    : result_type={ index: 'checked', text: ''} -    @@limit=if cgi['sql_match_limit'].to_s=~/2500/ -      checked_sql_limit={ l1000: '', l2500: 'checked'} -      '2500' -    else -      checked_sql_limit={ l1000: 'checked', l2500: ''} -      '1000' -    end -    checked_echo     =  'checked' if cgi['echo'] =~/\S/ -    checked_stats    =  'checked' if cgi['stats'] =~/\S/ -    checked_url      =  'checked' if cgi['url'] =~/\S/ or cgi['u'].to_i==1 -    checked_searched =  'checked' if cgi['searched'] =~/\S/ -    checked_tip      =  'checked' if cgi['tip'] =~/\S/ -    checked_case     =  'checked' if cgi['casesense'] =~/\S/ -    checked_sql      =  'checked' if cgi['sql'] =~/\S/ -    if cgi['checks'] =~ /check_all/ or cgi['check_all'] =~/\S/ or cgi['a'].to_i==1 -      checked_all    =  'checked' -      checked_echo=checked_stats=checked_url=checked_searched=checked_tip=checked_sql='checked' -      checked_none   ='' -    elsif cgi['checks'] =~ /check_none/ -      checked_none = 'checked' -      checked_all=checked_url=checked_stats=checked_searched=checked_tip=checked_echo=checked_sql='' -    elsif cgi['checks'] =~ /check_selected/ -      checked_selected = 'checked' -    elsif cgi['checks'] =~ /check_default/ -      checked_default = 'checked' -      checked_echo=checked_stats=checked_url='checked' -      checked_searched=checked_tip=checked_case=checked_sql='' -    else -      checked_selected='checked' -      checked_echo=checked_stats=checked_url='checked' -      checked_searched=checked_tip=checked_case=checked_sql='' -    end -    selected_db=case cgi['db'] -     when /spine.sqlite/ then '<option value="spine.sqlite">spine</option>' -    end -    db_name='spine.search.sql.db' -    #db_name='spine.sqlite.db' -    #db_name='sisu_sqlite.db' -    db_sqlite=case cgi['db'] -    when /spine.sqlite/ then "/srv/complete.sisudoc.org/web/manual/#{db_name}" -     else  "/var/www/sqlite/#{db_name}" -    end -    #when /spine.sqlite/ then "/srv/complete.sisudoc.org/web/manual/#{db_name}" -     #else  "/srv/complete.sisudoc.org/web/manual/#{db_name}" -    #end -    #@conn=SQLite3::Database.new(db_sqlite) -    @conn=SQLite3::Database.new("/var/www/sqlite/spine.search.sql.db") -    #@conn=SQLite3::Database.new("/var/www/spine.sqlite.db") -    @conn.results_as_hash=true -    search_field=cgi['find'] if cgi['find'] # =~/\S+/ -    @search_for=SearchRequest.new(search_field,q) #.analyze               #% search_for -                                                                           #% searches -    #Canned_search.new(@base,@search_for.text1,cgi) -    if @search_for.text1=~/\S+/ or @search_for.fulltext=~/\S+/ or @search_for.author=~/\S+/ or @search_for.topic_register=~/\S+/  #and search_field =~/\S/ -      s1   = 's1='    + CGI.escape(@search_for.text1)          if @search_for.text1          =~ /\S/ -      ft   = '&ft='   + CGI.escape(@search_for.fulltext)       if @search_for.fulltext       =~ /\S/ -      key  = 'key='   + CGI.escape(@search_for.keywords)       if @search_for.keywords       =~ /\S/ -      ti   = '&ti='   + CGI.escape(@search_for.title)          if @search_for.title          =~ /\S/ -      au   = '&au='   + CGI.escape(@search_for.author)         if @search_for.author         =~ /\S/ -      tr   = '&tr='   + CGI.escape(@search_for.topic_register) if @search_for.topic_register =~ /\S/ -      sj   = '&sj='   + CGI.escape(@search_for.subject)        if @search_for.subject        =~ /\S/ -      dsc  = '&dsc='  + CGI.escape(@search_for.description)    if @search_for.description    =~ /\S/ -      pb   = '&pb='   + CGI.escape(@search_for.publisher)      if @search_for.publisher      =~ /\S/ -      edt  = '&edt='  + CGI.escape(@search_for.editor)         if @search_for.editor         =~ /\S/ -      cntr = '&cntr=' + CGI.escape(@search_for.contributor)    if @search_for.contributor    =~ /\S/ -      dt   = '&dt='   + CGI.escape(@search_for.date)           if @search_for.date           =~ /\S/ -      ty   = '&ty='   + CGI.escape(@search_for.type)           if @search_for.type           =~ /\S/ -      id   = '&id='   + CGI.escape(@search_for.identifier)     if @search_for.identifier     =~ /\S/ -      src  = '&src='  + CGI.escape(@search_for.source)         if @search_for.source         =~ /\S/ -      lang = '&lang=' + CGI.escape(@search_for.language)       if @search_for.language       =~ /\S/ -      rel  = '&rel='  + CGI.escape(@search_for.relation)       if @search_for.relation       =~ /\S/ -      cov  = '&cov='  + CGI.escape(@search_for.coverage)       if @search_for.coverage       =~ /\S/ -      cr   = '&cr='   + CGI.escape(@search_for.rights)         if @search_for.rights         =~ /\S/ -      co   = '&co='   + CGI.escape(@search_for.comment)        if @search_for.comment        =~ /\S/ -      ab   = '&ab='   + CGI.escape(@search_for.abstract)       if @search_for.abstract       =~ /\S/ -      dtc  = '&dtc='  + CGI.escape(@search_for.date_created)   if @search_for.date_created   =~ /\S/ -      dti  = '&dti='  + CGI.escape(@search_for.date_issued)    if @search_for.date_issued    =~ /\S/ -      dtm  = '&dtm='  + CGI.escape(@search_for.date_modified)  if @search_for.date_modified  =~ /\S/ -      dta  = '&dta='  + CGI.escape(@search_for.date_available) if @search_for.date_available =~ /\S/ -      dtv  = '&dtv='  + CGI.escape(@search_for.date_valid)     if @search_for.date_valid     =~ /\S/ -      fns  = '&fns='  + CGI.escape(@search_for.src_filename_base)   if @search_for.src_filename_base       =~ /\S/ -      @@canned_search_url=(checked_all =~/checked/) \ -      ? "#{@base}?#{s1}#{ft}#{key}#{ti}#{au}#{tr}#{sj}#{dsc}#{pb}#{edt}#{cntr}#{dt}#{ty}#{id}#{src}#{lang}#{rel}#{cov}#{cr}#{co}#{ab}#{dtc}#{dti}#{dtm}#{dta}#{dtv}#{fns}&db=#{cgi['db']}&view=#{cgi['view']}&a=1" -      : "#{@base}?#{s1}#{ft}#{key}#{ti}#{au}#{tr}#{sj}#{dsc}#{pb}#{edt}#{cntr}#{dt}#{ty}#{id}#{src}#{lang}#{rel}#{cov}#{cr}#{co}#{ab}#{dtc}#{dti}#{dtm}#{dta}#{dtv}#{fns}&db=#{cgi['db']}&view=#{cgi['view']}" -      mod=ft=~/\S+/ ? (ft.gsub(/ft/,'s1')) : s1 -      @canned_base_url="#{@base}?#{mod}&db=#{cgi['db']}" -      if checked_case=~/\S/ -        @search[:text][1]=%{doc_objects.clean~'#{@search_for.text1}'} #s1 -      else -        @search[:text][1]=%{doc_objects.clean~*'#{@search_for.text1}'} #s1 -      end -      canned_note='search url:' -    else -      @@canned_search_url="#{@base}?db=#{@db}&view=index" -      canned_note='search url example:' -    end -    if search_field =~/\S+/ -      analyze_format=search_field.gsub(/\s*\n/,'; ') -    elsif checked_all =~/checked/ or checked_url =~/checked/ -      canned_search=@@canned_search_url.scan(/(?:s1|ft|au|ti|fns|tr)=[^&]+/) -      af=canned_search.join('; ') -      af=af.gsub(/s1=/,'text: '). -        gsub(/ft=/,'fulltxt: '). -        gsub(/au=/,'author: '). -        gsub(/ti=/,'title: '). -        gsub(/fns=/,'src_filename_base: '). -        gsub(/tr=/,'topic_register: '). -        gsub(/%2B/,' ') -      analyze_format=af -      st=af.split(/\s*;\s*/) -      search_field=st.join("\n") -    end -    green=%{<font size="2" color="#004000">} -    canned_search_url_txt=CGI.escapeHTML(@@canned_search_url) -    the_can=%{<font size="2" color="#666666">#{canned_note} <a href="#{@@canned_search_url}">#{canned_search_url_txt}</a></font><br />} -    p_text=p_fulltext=p_keywords=p_title=p_author=p_topic_register=p_subject=p_description=p_publisher=p_editor=p_contributor=p_date=p_type=p_format=p_identifier=p_source=p_language=p_relation=p_coverage=p_rights=p_comment=p_abstract=p_filename='' -    p_filename       = %{src_filename_base: #{green}#{@search_for.src_filename_base}</font><br />}             if @search_for.src_filename_base       =~ /\S+/ -    p_text           = %{text: #{green}#{@search_for.text1}</font><br />}                    if @search_for.text1          =~ /\S+/ -    p_fulltext       = %{fulltxt: #{green}#{@search_for.fulltext}</font><br />}              if @search_for.fulltext       =~ /\S+/ -    p_title          = %{title: #{green}#{@search_for.title}</font><br />}                   if @search_for.title          =~ /\S+/ -    p_author         = %{author: #{green}#{@search_for.author}</font><br />}                 if @search_for.author         =~ /\S+/ -    p_editor         = %{editor: #{green}#{@search_for.editor}</font><br />}                 if @search_for.editor         =~ /\S+/ -    p_contributor    = %{contributor: #{green}#{@search_for.contributor}</font><br />}       if @search_for.contributor    =~ /\S+/ -    p_date           = %{date: #{green}#{@search_for.date}</font><br />}                     if @search_for.date           =~ /\S+/ -    p_rights         = %{rights: #{green}#{@search_for.rights}</font><br />}                 if @search_for.rights         =~ /\S+/ -    p_topic_register = %{topic_register: #{green}#{@search_for.topic_register}</font><br />} if @search_for.topic_register =~ /\S+/ -    p_subject        = %{subject: #{green}#{@search_for.subject}</font><br />}               if @search_for.subject        =~ /\S+/ -    p_keywords       = %{keywords: #{green}#{@search_for.keywords}</font><br />}             if @search_for.keywords       =~ /\S+/ -    p_identifier     = %{identifier: #{green}#{@search_for.identifier}</font><br />}         if @search_for.identifier     =~ /\S+/ -    p_type           = %{type: #{green}#{@search_for.type}</font><br />}                     if @search_for.type           =~ /\S+/ -    p_format         = %{format: #{green}#{@search_for.format}</font><br />}                 if @search_for.format         =~ /\S+/ -    p_relation       = %{relation: #{green}#{@search_for.relation}</font><br />}             if @search_for.relation       =~ /\S+/ -    p_coverage       = %{coverage: #{green}#{@search_for.coverage}</font><br />}             if @search_for.coverage       =~ /\S+/ -    p_description    = %{description: #{green}#{@search_for.description}</font><br />}       if @search_for.description    =~ /\S+/ -    p_abstract       = %{abstract: #{green}#{@search_for.abstract}</font><br />}             if @search_for.abstract       =~ /\S+/ -    p_comment        = %{comment: #{green}#{@search_for.comment}</font><br />}               if @search_for.comment        =~ /\S+/ -    p_publisher      = %{publisher: #{green}#{@search_for.publisher}</font><br />}           if @search_for.publisher      =~ /\S+/ -    p_source         = %{source: #{green}#{@search_for.source}</font><br />}                 if @search_for.source         =~ /\S+/ -    p_language       = %{language: #{green}#{@search_for.language}</font><br />}             if @search_for.language       =~ /\S+/ -    search_note=<<-WOK -      <font size="2" color="#666666"> -      <b>database:</b> #{green}#{@db}</font>; <b>selected view:</b> #{green}#{cgi['view']}</font> -      <b>search string:</b> "#{green}#{analyze_format}</font>"<br /> -      #{p_text} #{p_fulltext} #{p_keywords} #{p_title} #{p_author} #{p_topic_register} #{p_subject} #{p_description} #{p_publisher} #{p_editor} #{p_contributor} #{p_date} #{p_type} #{p_format} #{p_identifier} #{p_source} #{p_language} #{p_relation} #{p_coverage} #{p_rights} #{p_comment} #{p_abstract} #{p_filename} -      </font> -      WOK -  #eg = %{canned search e.g.:<br /> <a href="#{url}">#{url}</a><br />find: #{analyze}<br />database: #{database}} -  #% dbi_canning -  @header = Form.new(@base,search_field,selected_db,result_type,checked_sql_limit,checked_tip,checked_stats,checked_searched,checked_url,checked_case,checked_echo,checked_sql,checked_all,checked_none,checked_selected,checked_default,search_note,the_can).submission_form #% form -  unless q['s1'] =~/\S/ or q['au'] =~/\S/ or @search[:text][1] =~/\S/ -    print "Content-type: text/html\n\n" -    puts (@header+@tail) -  else #% searches -    s1=(@search_for.text1 =~/\S/) \ -    ? @search_for.text1 -    : 'Unavailable' -    if checked_case=~/\S/ -      @search[:text]<<%{doc_objects.clean~'#{CGI.unescape(s1)}'} -    else -      @search[:text]<<%{doc_objects.clean~*'#{CGI.unescape(s1)}'} -    end -    #% dbi_request -    dbi_statement=DBI_SearchStatement.new(@conn,@search_for,q,checked_case) -    @text_search_flag=false -    @text_search_flag=dbi_statement.text_search_flag -    s_contents=dbi_statement.contents -    @body_main='' -    @search_regx=nil -    olduid="" -    if @text_search_flag -      if checked_sql =~/\S/ -        sql_select_body=dbi_statement.sql_select_body_format -      else sql_select_body='' -      end -      @body_main << sql_select_body -      #@body_main << '<p><hr><br /><b>Main Text:</b><br />' << sql_select_body -    else -    end -    @hostpath = "#{@hosturl_files}" -    #@hostpath="#{@hosturl_files}/#{@stub}" -    def path_manifest(fn,ln=nil) -      case @output_dir_structure_by -      when 'filename' -        @lingual =='mono' \ -        ? "#{@hostpath}/#{fn}/sisu_manifest.html" -        : "#{@hostpath}/#{fn}/sisu_manifest.#{ln}.html" -      when 'filetype' -        @lingual =='mono' \ -        ? "#{@hostpath}/manifest/#{fn}.html" -        : "#{@hostpath}/manifest/#{fn}.#{ln}.html" -      else -        "#{@hostpath}/#{ln}/manifest/#{fn}.html" -      end -    end -    def path_html_seg(fn,ln=nil) -      case @output_dir_structure_by -      when 'filename' -        "#{@hostpath}/#{fn}" -      when 'filetype' -        "#{@hostpath}/html/#{fn}" -      else -        "#{@hostpath}/#{ln}/html/#{fn}" -      end -    end -    def path_toc(fn,ln=nil) -      if @output_dir_structure_by =='filename' \ -      or @output_dir_structure_by =='filetype' -        @lingual =='mono' \ -        ? "#{path_html_seg(fn,ln)}/toc.html" -        : "#{path_html_seg(fn,ln)}/toc.#{ln}.html" -      else -        "#{path_html_seg(fn,ln)}/toc.html" -      end -    end -    def path_filename(fn,seg_name,ln=nil) -      if @output_dir_structure_by =='filename' \ -      or @output_dir_structure_by =='filetype' -        @lingual =='mono' \ -        ? "#{path_html_seg(fn,ln)}/#{seg_name}.html" -        : "#{path_html_seg(fn,ln)}/#{seg_name}.#{ln}.html" -      else -        "#{path_html_seg(fn,ln)}/#{seg_name}.html" -      end -    end -    def path_html_doc(fn,ln=nil) -      case @output_dir_structure_by -      when 'filename' -        @lingual =='mono' \ -        ? "#{path_html_seg(fn,ln)}/scroll.html" -        : "#{path_html_seg(fn,ln)}/scroll.#{ln}.html" -      when 'filetype' -        @lingual =='mono' \ -        ? "#{@hostpath}/html/#{fn}.html" -        : "#{@hostpath}/html/#{fn}.#{ln}.html" -      else -        "#{@hostpath}/#{ln}/html/#{fn}.html" -      end -    end -              #% text_objects_body -    s_contents.each do |c|                                               #% text body -      location=c['src_filename_base'][/(.+?)\.(?:ssm\.sst|sst)$/,1] -      file_suffix=c['src_filename_base'][/.+?\.(ssm\.sst|sst)$/,1] -      lang=if location =~ /\S+?~(\S\S\S?)$/ -        l=location[/\S+?~(\S\S\S?)$/,1] -        location=location.gsub(/(\S+?)~\S\S\S?/,'\1') -        l=".#{l}" -      else '' -      end -    #% metadata_found_body -      if c['uid'] != olduid -        ti=c['title'] -        can_txt_srch=(cgi['view']=~/index/) \ -        ? %{<a href="#{@canned_base_url}&fns=#{c['src_filename_base']}&lang=#{c['language_document_char']}&view=text"><img border="0" width="24" height="16" src="#{@image_src}/b_search.png" alt="search"></a> } -        : %{<a href="#{@canned_base_url}&fns=#{c['src_filename_base']}&lang=#{c['language_document_char']}&view=index"><img border="0" width="24" height="16" src="#{@image_src}/b_search.png" alt="search"></a> } -        title = %{<span style="background-color: #{@color_heading}"><a href="#{path_toc(location,c['language_document_char'])}"><img border="0" width="15" height="18" src="#{@image_src}/b_toc.png" alt=""> #{ti}</a></span> [#{c['language_document_char']}] by #{c['creator_author']} <a href="#{path_manifest(location,c['language_document_char'])}"><img border="0" width="15" height="15" src="#{@image_src}/b_info.png" alt=""></a> #{can_txt_srch}<br />} -        title=@text_search_flag \ -        ? '<br /><hr>'+title -        : '<br />'+title -        @counter_txt_doc+=1 -        olduid=c['uid'] -      else                    title='' -      end -      if @text_search_flag -        if cgi['view']=~/text/ \ -        or (cgi['view']!~/index/ and cgi['search'] !~/search db/)      #% txt body -          text=if c['suffix'] !~/1/ #seg -            if @search_for.text1 =~/\S+/ \ -            or q['s1'] =~/\S+/                         #% only this branch is working !! -              unescaped_search=if @search_for.text1 =~/\S+/ -                CGI.unescape(@search_for.text1) -              elsif q['s1'] =~/\S+/ -                CGI.unescape(q['s1']) -              else nil -              end -              @search_regx=if unescaped_search                                     #check -                search_regex=[] -                build=unescaped_search.scan(/\S+/).each do |g| -                   (g.to_s =~/(AND|OR)/) \ -                   ? (search_regex << '|') -                   : (search_regex << %{#{g.to_s}}) -                end -                search_regex=search_regex.join(' ') -                search_regex=search_regex.gsub(/\s*\|\s*/,'|') -                Regexp.new(search_regex, Regexp::IGNORECASE) -              else nil -              end -            else nil -            end -            matched_para=(@search_regx.to_s.class==String && @search_regx.to_s=~/\S\S+/) \ -            ? (c['body'].gsub(/(<a\s+href="https?:\/\/[^><\s]+#{@search_regx}[^>]+?>|#{@search_regx})/mi,%{<span style="background-color: #{@color_match}">\\1</span>})) -            : c['body'] -            %{<hr><p><font size="2">ocn <b><a href="#{path_filename(location,c['seg_name'],c['language_document_char'])}##{c['ocn']}">#{c['ocn']}</a></b>:</font></p>#{matched_para}} -          elsif c['suffix'] =~/1/ #doc -            %{#{title}<hr><p><font size="2">ocn #{c['ocn']}:#{c['body']}} -          end -          @counter_txt_ocn+=1 -          output=title+text -        else #elsif cgi['view']=~/index/                                #% idx body -          if c['suffix'] !~/1/ #seg -            index=%{<a href="#{path_filename(location,c['seg_name'],c['language_document_char'])}##{c['ocn']}">#{c['ocn']}</a>, } if @text_search_flag -          elsif c['suffix'] =~/1/ #doc #FIX -            index=%{<a href="#{path_html_doc(location,c['language_document_char'])}##{c['ocn']}">#{c['ocn']}</a>, } -          end -          if c['seg_name'] =~/\S+/ -            if @text_search_flag -              @counter_txt_ocn+=1 -              output=title+index -            end -          else -            @counter_txt_ocn+=1 -            output=c['suffix'] !~/1/ \ -            ? title+index -            : %{#{title}#{c['ocn'].sort}, } -          end -        end -      else output=title -      end -      @counters_txt=if @counter_txt_doc > 0 -        if checked_stats =~/\S/ -          @@lt_t=(@counter_txt_ocn==dbi_statement.sql_match_limit.to_i) ? true : false -          start=(@@offset.to_i+1).to_s -          range=(@@offset.to_i+@counter_txt_ocn.to_i).to_s -          %{<hr /><font size="2" color="#666666">Found #{@counter_txt_ocn} times in the main body of #{@counter_txt_doc} documents [ matches #{start} to #{range} ]</font><br />} -        else '' -        end -      else '' -      end -      @body_main << output #+ details -    end -    olduid = "" -    offset=dbi_statement.sql_offset.to_s -    limit=dbi_statement.sql_match_limit.to_s -    @@lt_t ||=false; @@lt_e ||=false -    canned=(@@lt_t or @@lt_e) \ -    ? dbi_statement.pre_next(true,@image_src).to_s -    : dbi_statement.pre_next(false,@image_src).to_s -    limit=dbi_statement.sql_match_limit.to_s -    cgi.out{ -      @header.force_encoding("UTF-8") \ -      + @counters_txt.force_encoding("UTF-8") \ -      + @counters_endn.force_encoding("UTF-8") \ -      + canned.force_encoding("UTF-8") \ -      + @body_main.force_encoding("UTF-8") \ -      + canned.force_encoding("UTF-8") \ -      + @tail.force_encoding("UTF-8") -    } #% print cgi_output_header+counters+body -  end -  rescue Exception => e -    s='<pre>' + CGI::escapeHTML(e.backtrace.reverse.join("\n")) -    s << CGI::escapeHTML(e.message) + '</pre>' -    cgi.out{s} -    next -  ensure # eg. disconnect from server -    @conn.disconnect if @conn -  end -end diff --git a/misc/util/rb/tex/dr_tex.rb b/misc/util/rb/tex/dr_tex.rb deleted file mode 100755 index b71d8c1..0000000 --- a/misc/util/rb/tex/dr_tex.rb +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env ruby -# /+ -# - Name: Spine, Doc Reform [a part of] -#   - Description: documents, structuring, processing, publishing, search -#     - static content generator -# -#   - Author: Ralph Amissah -#     [ralph.amissah@gmail.com] -# -#   - Copyright: (C) 2015 - 2021 Ralph Amissah, All Rights -#     Reserved. -# -#   - License: AGPL 3 or later: -# -#     Spine (SiSU), a framework for document structuring, publishing and -#     search -# -#     Copyright (C) Ralph Amissah -# -#     This program is free software: you can redistribute it and/or modify it -#     under the terms of the GNU AFERO General Public License as published by the -#     Free Software Foundation, either version 3 of the License, or (at your -#     option) any later version. -# -#     This program is distributed in the hope that it will be useful, but WITHOUT -#     ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -#     FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -#     more details. -# -#     You should have received a copy of the GNU General Public License along with -#     this program. If not, see [https://www.gnu.org/licenses/]. -# -#     If you have Internet connection, the latest version of the AGPL should be -#     available at these locations: -#     [https://www.fsf.org/licensing/licenses/agpl.html] -#     [https://www.gnu.org/licenses/agpl.html] -# -#   - Spine (by Doc Reform, related to SiSU) uses standard: -#     - docReform markup syntax -#       - standard SiSU markup syntax with modified headers and minor modifications -#     - docReform object numbering -#       - standard SiSU object citation numbering & system -# -#   - Hompages: -#     [https://www.doc_reform.org] -#     [https://www.sisudoc.org] -# -#   - Git -#     [https://git.sisudoc.org/projects/?p=software/spine.git;a=summary] -# -# +/ -require 'fileutils' -pwd = Dir.pwd -argv,texfiles_with_path,flags=[],[],[] -lngs = %{(am|bg|bn|br|ca|cs|cy|da|de|el|en|eo|es|et|eu|fi|fr|ga|gl|he|hi|hr|hy|ia|is|it|ja|ko|la|lo|lt|lv|ml|mr|nl|no|nn|oc|pl|pt|pt_BR|ro|ru|sa|se|sk|sl|sq|sr|sv|ta|te|th|tk|tr|uk|ur|vi|zh)} -Regexp.new(lngs, Regexp::IGNORECASE) -argv=$* -argv.sort.each{|y| (y =~/^--\S+$/i) ? (flags << y) : (texfiles_with_path << y) } -if flags.length==0 \ -|| flags.inspect =~/"--help"/ -  cmd=(/([^\/]+)$/).match($0)[1] -  puts <<WOK -#{cmd} --help -#{cmd} --out=[output path] -#{cmd} --paper-size=a5 --out=~/test -WOK -end -// paper_size_orientation = (flags.inspect.match(/"--paper-size=(a4|a5|b5|letter|legal)"/)) ? $1 : "a4" -out_path = Dir.pwd -if (flags.inspect.match(/"--out=\S+"/)) -  out_path = flags.inspect.match(/"--out=(\S+)"/)[1] -  unless (FileTest.directory?(out_path)) -    puts "Creating output directory: --out=#{out_path}" -    FileUtils::mkdir_p(out_path) -    unless (FileTest.directory?(out_path)) -      puts "FAILS unable to create directory: #{out_path}" -      exit -    end -  end -end -if texfiles_with_path.length == 0 -  texfiles_with_path=Dir.glob('*.tex') -end -if texfiles_with_path.length > 0 -  texfiles_with_path.each do |texfile_with_path| -    if texfile_with_path =~/.+\.tex/ -      #puts texfile_with_path -      if FileTest.file?(texfile_with_path) -        file_basename_with_path = texfile_with_path.sub(/\.tex$/,'') -        file_basename = file_basename_with_path.sub(/.*?([^\/]+)$/,'\1') -        _out_path = out_path -        if file_basename =~ /\.#{lngs}$/ -          lng = file_basename.match(/\.#{lngs}$/)[1] -          puts file_basename -          puts lng -          puts _out_path -          unless _out_path.match(/\/#{lng}\/pdf$/) -            _out_path = "#{out_path}/#{lng}/pdf" -            FileUtils::mkdir_p(_out_path) -          end -        end -        texpdf_cmd = %{xetex -interaction=batchmode -fmt=xelatex #{texfile_with_path}\n} -        puts texpdf_cmd -        2.times { |i| system(texpdf_cmd) } -        if (FileTest.file?(%{#{pwd}/#{file_basename}.pdf})) && (FileTest.directory?(_out_path)) -          FileUtils::Verbose::mv(%{#{pwd}/#{file_basename}.pdf}, %{#{_out_path}/#{file_basename}.pdf}) -          puts (%{#{_out_path}/#{file_basename}.pdf}) -        else -          puts "issue with pdf file or output directory" -          puts "pdf file:   #{pwd}/#{file_basename}.pdf}" -          puts "output dir: #{_out_path}/" -        end -        suffix = ['log', 'out', 'toc', 'aux'] -        suffix.each { |s| FileUtils::rm_f(%{#{pwd}/#{file_basename}.#{s}})} -      end -    end -  end -end -Dir.chdir(pwd) -__END__ | 
