diff --git a/.gitignore b/.gitignore index 7bbc71c..d1f8edc 100644 --- a/.gitignore +++ b/.gitignore @@ -99,3 +99,6 @@ ENV/ # mypy .mypy_cache/ + +# Ignore directories in lexicon/ +lexicon/*/ \ No newline at end of file diff --git a/lexicon/readme.txt b/lexicon/readme.txt new file mode 100644 index 0000000..df62f29 --- /dev/null +++ b/lexicon/readme.txt @@ -0,0 +1 @@ +This directory contains the directories for all of your Lexicon games. diff --git a/lexipython.py b/lexipython.py old mode 100644 new mode 100755 index be9644e..c01864d --- a/lexipython.py +++ b/lexipython.py @@ -1,576 +1,188 @@ -############################### -## Lexipython Lexicon engine ## -############################### +#!/usr/bin/env python3 -import sys # For argv and stderr -import os # For reading directories -import re # For parsing lex content -import io # For writing pages out as UTF-8 -import networkx # For pagerank analytics -from collections import defaultdict # For rank inversion in statistics -from urllib import parse +import sys +if sys.version_info[0] < 3: + raise Exception("Lexipython requires Python 3") -# Short utility functions for handling titles +import argparse +import os +import re +import json +from src.article import LexiconArticle +from src import build +from src import utils -def titlecase(s): - """Enforces capitalization of titles.""" - s = s.strip() - return s[:1].capitalize() + s[1:] - -def titleescape(s): - """Makes an article title filename-safe.""" - s = s.strip() - s = re.sub(r"\s+", '_', s) # Replace whitespace with _ - s = parse.quote(s) # Encode all other characters - s = re.sub(r"%", "", s) # Strip encoding %s - if len(s) > 64: # If the result is unreasonably long, - s = hex(abs(hash(s)))[2:] # Replace it with a hex hash - return s - -def titlestrip(s): - """Strips certain prefixes for title sorting.""" - if s.startswith("The "): return s[4:] - if s.startswith("An "): return s[3:] - if s.startswith("A "): return s[2:] - return s - -# Main article class - -class LexiconArticle: +def is_lexicon(name): """ - A Lexicon article and its metadata. - - Members: - author string: the author of the article - turn integer: the turn the article was written for - title string: the article title - title_filesafe string: the title, escaped, used for filenames - content string: the HTML content, with citations replaced by format hooks - citations dict from format hook string to tuple of link alias and link target title - wcites list: titles of written articles cited - pcites list: titles of phantom articles cited - citedby list: titles of articles that cite this - The last three are filled in by populate(). + Checks whether the given folder is a Lexicon game. + Inputs: + name The Lexicon name to check. Assumed to be an existing folder. + Output: + Returns a tuple (result, msg, status), where result is True if the + given name is a Lexicon game and False otherwise, msg is the Lexicon's + status or an error message, and status is the status dictionary of the + Lexicon or None. """ - - def __init__(self, author, turn, title, content, citations): - """ - Creates a LexiconArticle object with the given parameters. - """ - self.author = author - self.turn = turn - self.title = title - self.title_filesafe = titleescape(title) - self.content = content - self.citations = citations - self.wcites = set() - self.pcites = set() - self.citedby = set() - - @staticmethod - def from_file_raw(raw_content): - """ - Parses the contents of a Lexipython source file into a LexiconArticle - object. If the source file is malformed, returns None. - """ - headers = raw_content.split('\n', 3) - if len(headers) != 4: - print("Header read error") - return None - author_header, turn_header, title_header, content_raw = headers - # Validate and sanitize the author header - if not author_header.startswith("# Author:"): - print("Author header missing") - return None - author = author_header[9:].strip() - # Validate and sanitize the turn header - if not turn_header.startswith("# Turn:"): - print("Turn header missing") - return None - turn = None + if not os.path.isfile(os.path.join("lexicon", name, "lexicon.cfg")): + return (False, "'{}' is not a Lexicon game, or its config file may be missing.".format(name), None) + if not os.path.isfile(os.path.join("lexicon", name, "status")): + return (True, "status missing", None) + with open(os.path.join("lexicon", name, "status")) as statusfile: + raw = statusfile.read() + if len(raw) == 0: + return (True, "unbuilt", {}) try: - turn = int(turn_header[7:].strip()) + status = json.loads(raw) except: - print("Turn header error") - return None - # Validate and sanitize the title header - if not title_header.startswith("# Title:"): - print("Title header missing") - return None - title = titlecase(title_header[8:]) - # Parse the content and extract citations - paras = re.split("\n\n+", content_raw.strip()) - content = "" - citations = {} - format_id = 1 - if not paras: - print("No content") - for para in paras: - # Escape angle brackets - para = re.sub("<", "<", para) - para = re.sub(">", ">", para) - # Replace bold and italic marks with tags - para = re.sub(r"//([^/]+)//", r"\1", para) - para = re.sub(r"\*\*([^*]+)\*\*", r"\1", para) - # Replace \\LF with
LF - para = re.sub(r"\\\\\n", "
\n", para) - # Abstract citations into the citation record - link_match = re.search(r"\[\[(([^|\[\]]+)\|)?([^|\[\]]+)\]\]", para) - while link_match: - # Identify the citation text and cited article - cite_text = link_match.group(2) if link_match.group(2) else link_match.group(3) - cite_title = titlecase(link_match.group(3)) - # Record the citation - citations["c"+str(format_id)] = (cite_text, cite_title) - # Stitch the format id in place of the citation - para = para[:link_match.start(0)] + "{c"+str(format_id)+"}" + para[link_match.end(0):] - format_id += 1 # Increment to the next format citation - link_match = re.search(r"\[\[(([^|\[\]]+)\|)?([^|\[\]]+)\]\]", para) - # Convert signature to right-aligned - if para[:1] == '~': - para = "

" + para[1:] + "

\n" - else: - para = "

" + para + "

\n" - content += para - return LexiconArticle(author, turn, title, content, citations) + return (True, "status corrupted", None) + return (True, "ye", status) # TODO + return (False, "Error checking Lexicon status", None) - def build_page_content(self): - """ - Formats citations into the article content as normal HTML links - and returns the result. - """ - format_map = { - format_id: "{0}".format( - cite_tuple[0], titleescape(cite_tuple[1]), - "" if cite_tuple[1] in self.wcites else " class=\"phantom\"") - for format_id, cite_tuple in self.citations.items() - } - return self.content.format(**format_map) - - def build_page_citeblock(self, prev_target, next_target): - """ - Builds the citeblock content HTML for use in regular article pages. - For each defined target, links the target page as Previous or Next. - """ - citeblock = "
\n" - # Prev/next links - if next_target is not None: - citeblock += "

Next →

\n".format(titleescape(next_target)) - if prev_target is not None: - citeblock += "

← Previous

\n".format(titleescape(prev_target)) - elif next_target is not None: - citeblock += "

 

\n" - # Citations - cites_links = [ - "{0}".format( - title, titleescape(title), - "" if title in self.wcites else " class=\"phantom\"") - for title in sorted(self.wcites | self.pcites)] - cites_str = " | ".join(cites_links) - if len(cites_str) < 1: cites_str = "--" - citeblock += "

Citations: {}

\n".format(cites_str) - # Citedby - citedby_links = [ - "{0}".format( - title, titleescape(title)) - for title in self.citedby] - citedby_str = " | ".join(citedby_links) - if len(citedby_str) < 1: citedby_str = "--" - citeblock += "

Cited by: {}

\n
\n".format(citedby_str) - return citeblock - -# Parsing functions for source intake - -def parse_from_directory(directory): +def overview_all(): """ - Reads and parses each source file in the given directory. - Input: directory, the path to the folder to read - Output: a list of parsed articles + Prints the names and statuses of all extant Lexicons, + or a short help message if none have been created yet. """ - articles = [] - print("Reading source files from", directory) - for filename in os.listdir(directory): - path = directory + filename - # Read only .txt files - if filename[-4:] == ".txt": - print(" Parsing", filename) - with open(path, "r", encoding="utf8") as src_file: - raw = src_file.read() - article = LexiconArticle.from_file_raw(raw) - if article is None: - print(" ERROR") - else: - print(" success:", article.title) - articles.append(article) - return articles - -def populate(lexicon_articles): - """ - Given a list of lexicon articles, fills out citation information - for each article and creates phantom pages for missing articles. - """ - article_by_title = {article.title : article for article in lexicon_articles} - # Determine all articles that exist or should exist - extant_titles = set([citation[1] for article in lexicon_articles for citation in article.citations]) - # Interlink all citations - for article in lexicon_articles: - for cite_tuple in article.citations.values(): - target = cite_tuple[1] - # Create article objects for phantom citations - if target not in article_by_title: - article_by_title[target] = LexiconArticle(None, sys.maxsize, target, "

This entry hasn't been written yet.

", {}) - # Interlink citations - if article_by_title[target].author is None: - article.pcites.add(target) - else: - article.wcites.add(target) - article_by_title[target].citedby.add(article.title) - return list(article_by_title.values()) - -def load_resource(filename, cache={}): - """Loads files from the resources directory with caching.""" - if filename not in cache: - cache[filename] = open("resources/" + filename, "r", encoding="utf8").read() - return cache[filename] - -def load_config(): - """Loads values from the config file.""" - config = {} - with open("lexicon.cfg", "r", encoding="utf8") as f: - line = f.readline() - while line: - # Skim lines until a value definition begins - conf_match = re.match(">>>([^>]+)>>>\s+", line) - if not conf_match: - line = f.readline() - continue - # Accumulate the conf value until the value ends - conf = conf_match.group(1) - conf_value = "" - line = f.readline() - conf_match = re.match("<<<{0}<<<\s+".format(conf), line) - while line and not conf_match: - conf_value += line - line = f.readline() - conf_match = re.match("<<<{0}<<<\s+".format(conf), line) - if not line: - raise SystemExit("Reached EOF while reading config value {}".format(conf)) - config[conf] = conf_value.strip() - # Check that all necessary values were configured - for config_value in ['LEXICON_TITLE', 'PROMPT', 'SESSION_PAGE', "INDEX_LIST"]: - if config_value not in config: - raise SystemExit("Error: {} not set in lexipython.cfg".format(config_value)) - return config - -# Build functions - -def build_contents_page(articles, config): - """ - Builds the full HTML of the contents page. - """ - content = "" - # Article counts - phantom_count = len([article for article in articles if article.author is None]) - if phantom_count == 0: - content = "

There are {0} entries in this lexicon.

\n".format(len(articles)) + # Scan the directory + lexicon_names = [] + with os.scandir("lexicon") as lexicons: + for entry in lexicons: + if entry.is_dir(): + result, msg, status = is_lexicon(entry.name) + if result: + lexicon_names.append((entry.name, msg)) + # Print the results + if len(lexicon_names) > 0: + l = max([len(name) for name, msg in lexicon_names]) + 4 + print("Lexicons:") + for name, msg in sorted(lexicon_names): + print(" {}{}{}".format(name, " " * (l - len(name)), msg)) else: - content = "

There are {0} entries, {1} written and {2} phantom.

\n".format( - len(articles), len(articles) - phantom_count, phantom_count) - # Prepare article links - link_by_title = {article.title : "{0}".format( - article.title, article.title_filesafe, - "" if article.author is not None else " class=\"phantom\"") - for article in articles} - # Write the articles in alphabetical order - content += load_resource("contents.html") - content += "
\n\n
\n" - # Write the articles in turn order - content += "
\n\n
\n" - # Fill in the page skeleton - entry_skeleton = load_resource("entry-page.html") - css = load_resource("lexicon.css") - return entry_skeleton.format( - title="Index of " + config["LEXICON_TITLE"], - lexicon=config["LEXICON_TITLE"], - css=css, - logo=config["LOGO_FILENAME"], - prompt=config["PROMPT"], - content=content, - citeblock="") + print("There are no Lexicons yet. Create one with:\n\n"\ + " lexipython.py [name] init\n") -def build_rules_page(config): +def overview_one(name): """ - Builds the full HTML of the rules page. + Prints the status and summary information for the Lexicon with the + given name. """ - content = load_resource("rules.html") - # Fill in the entry skeleton - entry_skeleton = load_resource("entry-page.html") - css = load_resource("lexicon.css") - return entry_skeleton.format( - title="Rules", - lexicon=config["LEXICON_TITLE"], - css=css, - logo=config["LOGO_FILENAME"], - prompt=config["PROMPT"], - content=content, - citeblock="") - -def build_formatting_page(config): - """ - Builds the full HTML of the formatting page. - """ - content = load_resource("formatting.html") - # Fill in the entry skeleton - entry_skeleton = load_resource("entry-page.html") - css = load_resource("lexicon.css") - return entry_skeleton.format( - title="Formatting", - lexicon=config["LEXICON_TITLE"], - css=css, - logo=config["LOGO_FILENAME"], - prompt=config["PROMPT"], - content=content, - citeblock="") - -def build_session_page(config): - """ - Builds the full HTML of the session page. - """ - # Fill in the entry skeleton - entry_skeleton = load_resource("entry-page.html") - css = load_resource("lexicon.css") - return entry_skeleton.format( - title=config["LEXICON_TITLE"], - lexicon=config["LEXICON_TITLE"], - css=css, - logo=config["LOGO_FILENAME"], - prompt=config["PROMPT"], - content=config["SESSION_PAGE"], - citeblock="") - -def build_statistics_page(articles, config): - """ - Builds the full HTML of the statistics page. - """ - content = "" - cite_map = {article.title : [cite_tuple[1] for cite_tuple in article.citations.values()] for article in articles} - # Pages by pagerank - content += "
\n" - content += "

Top 10 pages by page rank:
\n" - G = networkx.Graph() - for citer, citeds in cite_map.items(): - for cited in citeds: - G.add_edge(citer, cited) - ranks = networkx.pagerank(G) - sranks = sorted(ranks.items(), key=lambda x: x[1], reverse=True) - ranking = list(enumerate(map(lambda x: x[0], sranks))) - content += "
\n".join(map(lambda x: "{0} – {1}".format(x[0]+1, x[1]), ranking[:10])) - content += "

\n" - content += "
\n" - # Top numebr of citations made - content += "
\n" - content += "

Most citations made from:
\n" - citation_tally = [(kv[0], len(kv[1])) for kv in cite_map.items()] - citation_count = defaultdict(list) - for title, count in citation_tally: citation_count[count].append(title) - content += "
\n".join(map( - lambda kv: "{0} – {1}".format(kv[0], "; ".join(kv[1])), - sorted(citation_count.items(), reverse=True)[:3])) - content += "

\n" - content += "
\n" - # Top number of times cited - content += "
\n" - content += "

Most citations made to:
\n" - all_cited = set([title for cites in cite_map.values() for title in cites]) - cited_by_map = { cited: [citer for citer in cite_map.keys() if cited in cite_map[citer]] for cited in all_cited } - cited_tally = [(kv[0], len(kv[1])) for kv in cited_by_map.items()] - cited_count = defaultdict(list) - for title, count in cited_tally: cited_count[count].append(title) - content += "
\n".join(map( - lambda kv: "{0} – {1}".format(kv[0], "; ".join(kv[1])), - sorted(cited_count.items(), reverse=True)[:3])) - content += "

\n" - content += "
\n" - # Author pageranks - content += "
\n" - content += "

Author total page rank:
\n" - authors = sorted(set([article.author for article in articles if article.author is not None])) - articles_by = {author : [a for a in articles if a.author == author] for author in authors} - author_rank = {author : sum(map(lambda a: ranks[a.title], articles)) for author, articles in articles_by.items()} - content += "
\n".join(map( - lambda kv: "{0} – {1}".format(kv[0], round(kv[1], 3)), - sorted(author_rank.items(), key=lambda t:-t[1]))) - content += "

\n" - content += "
\n" - # Author citations made - content += "
\n" - content += "

Citations made by author
\n" - author_cite_count = {author : sum(map(lambda a:len(a.wcites | a.pcites), articles)) for author, articles in articles_by.items()} - content += "
\n".join(map( - lambda kv: "{0} – {1}".format(kv[0], kv[1]), - sorted(author_cite_count.items(), key=lambda t:-t[1]))) - content += "

\n" - content += "
\n" - # Author cited count - content += "
\n" - content += "

Citations made to author
\n" - cited_times = {author : 0 for author in authors} - for article in articles: - if article.author is not None: - cited_times[article.author] += len(article.citedby) - content += "
\n".join(map( - lambda kv: "{0} – {1}".format(kv[0], kv[1]), - sorted(cited_times.items(), key=lambda t:-t[1]))) - content += "

\n" - content += "
\n" - - # Fill in the entry skeleton - entry_skeleton = load_resource("entry-page.html") - css = load_resource("lexicon.css") - return entry_skeleton.format( - title="Statistics", - lexicon=config["LEXICON_TITLE"], - css=css, - logo=config["LOGO_FILENAME"], - prompt=config["PROMPT"], - content=content, - citeblock="") - -def build_graphviz_file(cite_map): - """ - Builds a citation graph in dot format for Graphviz. - """ - result = [] - result.append("digraph G {\n") - # Node labeling - written_entries = list(cite_map.keys()) - phantom_entries = set([title for cites in cite_map.values() for title in cites if title not in written_entries]) - node_labels = [title[:20] for title in written_entries + list(phantom_entries)] - node_names = [hash(i) for i in node_labels] - for i in range(len(node_labels)): - result.append("{} [label=\"{}\"];\n".format(node_names[i], node_labels[i])) - # Edges - for citer in written_entries: - for cited in cite_map[citer]: - result.append("{}->{};\n".format(hash(citer[:20]), hash(cited[:20]))) - # Return result - result.append("overlap=false;\n}\n") - return "".join(result)#"…" - -# Summative functions - -def command_build(argv): - if len(argv) >= 3 and (argv[2] != "partial" and argv[2] != "full"): - print("unknown build type: " + argv[2]) + # Verify the name + if not os.path.isdir(os.path.join("lexicon", name)): + print("Error: There is no Lexicon named '{}'.".format(name)) return - # Load content - config = load_config() - entry_skeleton = load_resource("entry-page.html") - css = load_resource("lexicon.css") - articles = [article for article in parse_from_directory("raw/") if article is not None] - written_titles = [article.title for article in articles] - articles = sorted(populate(articles), key=lambda a: (a.turn, a.title)) - #print(articles[13].title_filesafe) - #return - phantom_titles = [article.title for article in articles if article.title not in written_titles] - - # Write the redirect page - print("Writing redirect page...") - with open("out/index.html", "w", encoding="utf8") as f: - f.write(load_resource("redirect.html").format(lexicon=config["LEXICON_TITLE"])) - - # Write the article pages - print("Deleting old article pages...") - for filename in os.listdir("out/article/"): - if filename[-5:] == ".html": - os.remove("out/article/" + filename) - print("Writing article pages...") - l = len(articles) - for idx in range(l): - article = articles[idx] - with open("out/article/" + article.title_filesafe + ".html", "w", encoding="utf8") as f: - content = article.build_page_content() - citeblock = article.build_page_citeblock( - None if idx == 0 else articles[idx - 1].title, - None if idx == l-1 else articles[idx + 1].title) - article_html = entry_skeleton.format( - title = article.title, - lexicon = config["LEXICON_TITLE"], - css = css, - logo = config["LOGO_FILENAME"], - prompt = config["PROMPT"], - content = content, - citeblock = citeblock) - f.write(article_html) - print(" Wrote " + article.title) - - # Write default pages - print("Writing default pages...") - with open("out/contents/index.html", "w", encoding="utf8") as f: - f.write(build_contents_page(articles, config)) - print(" Wrote Contents") - with open("out/rules/index.html", "w", encoding="utf8") as f: - f.write(build_rules_page(config)) - print(" Wrote Rules") - with open("out/formatting/index.html", "w", encoding="utf8") as f: - f.write(build_formatting_page(config)) - print(" Wrote Formatting") - with open("out/session/index.html", "w", encoding="utf8") as f: - f.write(build_session_page(config)) - print(" Wrote Session") - with open("out/statistics/index.html", "w", encoding="utf8") as f: - f.write(build_statistics_page(articles, config)) - print(" Wrote Statistics") + result, msg, status = is_lexicon(name) + if not result: + print("Error: " + msg) + return + # Print status and summary + print(msg) + print(status) + # TODO - # Write auxiliary files - # TODO: write graphviz file - # TODO: write compiled lexicon page +def run_command(name, command): + """ + Runs a command on a Lexicon. + """ + if command == "init": + # Check that the folder isn't already there + if os.path.exists(os.path.join("lexicon", name)): + print("Error: Can't create '{}', it already exists.".format(name)) + return + # Create the Lexicon + command_init(name) + return + elif command == "build": + if not os.path.exists(os.path.join("lexicon", name)): + print("Error: There is no Lexicon named '{}'.".format(name)) + return + result, msg, status = is_lexicon(name) + if not result: + print("Error: " + msg) + return + # Build the Lexicon + command_build(name) + return + elif command == "run": + if not os.path.exists(os.path.join("lexicon", name)): + print("Error: There is no Lexicon named '{}'.".format(name)) + return + result, msg, status = is_lexicon(name) + if not result: + print("Error: " + msg) + return + # Run a server managing the Lexicon + command_run(name) + return + else: + print("Error: '{}' is not a valid command.".format(command)) + return + +def command_init(name): + """ + Sets up a Lexicon game with the given name. + """ + # Create the folder structure + lex_path = os.path.join("lexicon", name) + os.mkdir(lex_path) + os.mkdir(os.path.join(lex_path, "src")) + os.mkdir(os.path.join(lex_path, "article")) + os.mkdir(os.path.join(lex_path, "contents")) + os.mkdir(os.path.join(lex_path, "formatting")) + os.mkdir(os.path.join(lex_path, "rules")) + os.mkdir(os.path.join(lex_path, "session")) + os.mkdir(os.path.join(lex_path, "statistics")) + # Open the default config file + config = utils.load_resource("lexicon.cfg") + # Edit the name field + config = re.sub("Lexicon Title", "Lexicon {}".format(name), config) + # Create the Lexicon's config file + with open(os.path.join(lex_path, "lexicon.cfg"), "w") as config_file: + config_file.write(config) + # Create an example page + with open(os.path.join(lex_path, "src", "example-page.txt"), "w") as destfile: + destfile.write(utils.load_resource("example-page.txt")) + # Create an empty status file + open(os.path.join(lex_path, "status"), "w").close() + print("Created Lexicon {}".format(name)) + # Done initializing + return + +def command_build(name): + """ + Rebuilds the browsable pages of a Lexicon. + """ + build.build_all("lexicon", name) + +def command_run(name): + """ + Runs as a server managing a Lexicon. + """ + print("Not implemented") def main(): - if len(sys.argv) < 2: - print("Available commands:") - print(" - build [partial] : Build the lexicon and generate phantom stubs for all unwritten articles.") - print(" - build full : Build the lexicon and generate Ersatz pages for all unwritten articles.") - elif sys.argv[1] == "build": - command_build(sys.argv) + parser = argparse.ArgumentParser( + description="Lexipython is a Python application for playing the Lexicon RPG.", + epilog="Run lexipython.py without arguments to list the extant Lexicons.\n\n"\ + "Available commands:\n\n"\ + " init Create a Lexicon with the provided name\n"\ + " build Build the Lexicon, then exit\n"\ + " run Launch a persistent server managing the Lexicon\n", + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument("name", help="The name of the Lexicon to operate on", + nargs="?", default=None) + parser.add_argument("command", help="The operation to perform on the Lexicon", + nargs="?", default=None) + args = parser.parse_args() + + # If no Lexicon as specified + if args.name is None: + overview_all() + # If no command was specified + elif args.command is None: + overview_one(args.name) + # A command was specified else: - print("Unknown command: " + sys.argv[1]) + run_command(args.name, args.command) if __name__ == "__main__": main() diff --git a/out/article/Example_page.html b/out/article/Example_page.html deleted file mode 100644 index 575b4d5..0000000 --- a/out/article/Example_page.html +++ /dev/null @@ -1,49 +0,0 @@ - - -Example page | Lexicon Title - - - - - -
-

Example page

-

This is an example page. -Some words are italicized, -and some words are bolded. -All of these sentences are part of the same paragraph.

-

This is a new paragraph.
-Unlike the last paragraph, this line will be after a line break within the paragraph.

-

This is an example citation. You can also cite a phantom page with just the title.

-

Dr. X. Amplepage

-
-
-

Next →

-

 

-

Citations: Phantom page

-

Cited by: --

-
- - \ No newline at end of file diff --git a/out/article/Phantom_page.html b/out/article/Phantom_page.html deleted file mode 100644 index 91530aa..0000000 --- a/out/article/Phantom_page.html +++ /dev/null @@ -1,40 +0,0 @@ - - -Phantom page | Lexicon Title - - - - - -
-

Phantom page

-

This entry hasn't been written yet.

-
-

← Previous

-

Citations: --

-

Cited by: Example page

-
- - \ No newline at end of file diff --git a/out/contents/index.html b/out/contents/index.html deleted file mode 100644 index e8121da..0000000 --- a/out/contents/index.html +++ /dev/null @@ -1,75 +0,0 @@ - - -Index of Lexicon Title | Lexicon Title - - - - - -
-

Index of Lexicon Title

-

There are 2 entries, 1 written and 1 phantom.

- - -
- -
- -
- - \ No newline at end of file diff --git a/out/formatting/index.html b/out/formatting/index.html deleted file mode 100644 index d20e31e..0000000 --- a/out/formatting/index.html +++ /dev/null @@ -1,58 +0,0 @@ - - -Formatting | Lexicon Title - - - - - -
-

Formatting

-

Lexipython provides support for a limited amount of Markdown-esque formatting.

-
-# Author: Authorname
-# Turn: 1
-# Title: Example page
-
-This is an example page.
-Some words are //italicized//,
-and some words are **bolded**.
-All of these sentences are part of the same paragraph.
-
-This is a new paragraph.\\
-Unlike the last paragraph, this line will be after a line break within the paragraph.
-
-This is an [[example citation|Phantom page]]. You can also cite a [[phantom page]] with just the title.
-
-~Dr. X. Amplepage
-
-

Each turn, fill out the header with your author information, the current turn, and the title of your entry. It doesn't really matter what the Author field is, except that it must be the same across all articles you write.

-

Two line breaks begins a new paragraph. A single line break does nothing, unless the line is neded by a double backslash (\\).

-

Text bounded by ** will be bolded: **bold** produces bold. Text bounded by // will be italicized: //italics// produces italics.

-

To cite another Lexicon entry, use double brackets. Text in double brackets will cite and link to the entry of the same name: [[Example page]] produces Example page. Text in double brackets split with a | will alias the link as the left text and link to the entry with the name of the right text: [[this text|Example page]] produces this text. You must be precise in the entry title you cite to. Citations to "Example" vs. "The Example" will point to different entries and create different phantoms, and your GM will probably have to clean up after you.

-

Beginning a paragraph with ~ will right-align it and place a horizontal line above it. Use this for signing your entry with your scholar's name.

-
- - \ No newline at end of file diff --git a/out/index.html b/out/index.html deleted file mode 100644 index b676732..0000000 --- a/out/index.html +++ /dev/null @@ -1,9 +0,0 @@ - - -Lexicon Title - - - -

Redirecting to Lexicon Title...

- - \ No newline at end of file diff --git a/out/rules/index.html b/out/rules/index.html deleted file mode 100644 index 48b2004..0000000 --- a/out/rules/index.html +++ /dev/null @@ -1,53 +0,0 @@ - - -Rules | Lexicon Title - - - - - -
-

Rules

-
    -
  1. At the beginning of the game, you will be provided with a topic statement that sets the tone for the game. Use it for inspiration and a stepping-stone into shaping the world of the Lexicon.
  2. -
  3. Each round, you will be assigned an index, a grouping of letters. Your entry must alphabetize under that index.
      -
    1. Each index has a number of open slots equal to the number of players, which are taken up by article titles when an article is written in that index or a citation is made to an unwritten article, or phantom. If there are no open slots in your index, you must write the article for a phantom in that index.
    2. -
    3. "The" and "A" aren't counted in indexing.
  4. -
  5. Once you've picked an article title, write your article on that subject.
      -
    1. There are no hard and fast rules about style. Try to sound like an encyclopedia entry or the overview section at the top of a wiki article.
    2. -
    3. You must respect and not contradict any factual content of any posted articles. You may introduce new facts that place things in a new light, provide alternative interpretations, or flesh out unexplained details in unexpected ways; but you must not contradict what has been previously established as fact.
    4. -
    5. Aim for around 200-300 words.
  6. -
  7. Your article must cite other articles in the Lexicon. Sometimes these citations will be to phantoms, articles that have not been written yet.
      -
    1. On the first turn, your article must cite exactly two phantom articles.
    2. -
    3. On subsequent turns, your article must cite exactly two phantom articles, either already-cited phantoms or new ones. Your article must also cite at least one written article.
    4. -
    5. On the penultimate turn, you must cite exactly one phantom article and at least two written articles.
    6. -
    7. On the final turn, you must cite at least three written articles.
    8. -
    9. You may not cite an entry you wrote. You may cite phantoms you have cited before.
    10. -
    11. Once you cite a phantom, you cannot choose to write it if you write an article for that index later.
  8. - -

    Ersatz Scrivener. In the course of the game, it may come to pass that a scholar is assigned an index in which no slots are available, because this scholar has already cited all the phantoms in previous articles. When this happens, the player instead writes their article as Ersatz Scrivener, radical skeptic. Ersatz does not believe in the existence of whatever he is writing about, no matter how obvious it seems to others or how central it is in the developing history of the world. All references, testimony, etc. with regard to its existence are tragic delusion at best or malicious lies at worst. Unlike the other scholars, Ersatz does not treat the research of his peers as fact, because he does not believe he has peers. Players writing articles as Ersatz are encouraged to name and shame the work of the misguided amateurs collaborating with him.

    -
- - \ No newline at end of file diff --git a/out/session/index.html b/out/session/index.html deleted file mode 100644 index 87556ed..0000000 --- a/out/session/index.html +++ /dev/null @@ -1,35 +0,0 @@ - - -Lexicon Title | Lexicon Title - - - - - -
-

Lexicon Title

-

Put session information here, like the index grouping and turn count, where to send completed entries, index assignments, turn schedule, and so on.

- - \ No newline at end of file diff --git a/out/statistics/index.html b/out/statistics/index.html deleted file mode 100644 index 148e62f..0000000 --- a/out/statistics/index.html +++ /dev/null @@ -1,61 +0,0 @@ - - -Statistics | Lexicon Title - - - - - -
-

Statistics

-
-

Top 10 pages by page rank:
-1 – Example page
-2 – Phantom page

-
-
-

Most citations made from:
-2 – Example page
-0 – Phantom page

-
-
-

Most citations made to:
-1 – Phantom page

-
-
-

Author total page rank:
-Authorname – 0.5

-
-
-

Citations made by author
-Authorname – 1

-
-
-

Citations made to author
-Authorname – 0

-
-
- - \ No newline at end of file diff --git a/resources/contents.html b/resources/contents.html deleted file mode 100644 index 42e3b19..0000000 --- a/resources/contents.html +++ /dev/null @@ -1,21 +0,0 @@ - - diff --git a/resources/redirect.html b/resources/redirect.html deleted file mode 100644 index e20197a..0000000 --- a/resources/redirect.html +++ /dev/null @@ -1,9 +0,0 @@ - - -{lexicon} - - - -

Redirecting to {lexicon}...

- - \ No newline at end of file diff --git a/src/article.py b/src/article.py new file mode 100644 index 0000000..a4aeffc --- /dev/null +++ b/src/article.py @@ -0,0 +1,201 @@ +import os +import sys +import re +import src.utils as utils + +class LexiconArticle: + """ + A Lexicon article and its metadata. + + Members: + player string: the player of the article + turn integer: the turn the article was written for + title string: the article title + title_filesafe string: the title, escaped, used for filenames + content string: the HTML content, with citations replaced by format hooks + citations dict mapping format hook string to tuple of link alias and link target title + wcites list: titles of written articles cited + pcites list: titles of phantom articles cited + citedby list: titles of articles that cite this + The last three are filled in by populate(). + """ + + def __init__(self, player, turn, title, content, citations): + """ + Creates a LexiconArticle object with the given parameters. + """ + self.player = player + self.turn = turn + self.title = title + self.title_filesafe = utils.titleescape(title) + self.content = content + self.citations = citations + self.wcites = set() + self.pcites = set() + self.citedby = set() + + @staticmethod + def from_file_raw(raw_content): + """ + Parses the contents of a Lexipython source file into a LexiconArticle + object. If the source file is malformed, returns None. + """ + headers = raw_content.split('\n', 3) + if len(headers) != 4: + print("Header read error") + return None + player_header, turn_header, title_header, content_raw = headers + # Validate and sanitize the player header + if not player_header.startswith("# Player:"): + print("Player header missing or corrupted") + return None + player = player_header[9:].strip() + # Validate and sanitize the turn header + if not turn_header.startswith("# Turn:"): + print("Turn header missing or corrupted") + return None + turn = None + try: + turn = int(turn_header[7:].strip()) + except: + print("Turn header error") + return None + # Validate and sanitize the title header + if not title_header.startswith("# Title:"): + print("Title header missing or corrupted") + return None + title = utils.titlecase(title_header[8:]) + # Parse the content and extract citations + paras = re.split("\n\n+", content_raw.strip()) + content = "" + citations = {} + format_id = 1 + if not paras: + print("No content") + for para in paras: + # Escape angle brackets + para = re.sub("<", "<", para) + para = re.sub(">", ">", para) + # Replace bold and italic marks with tags + para = re.sub(r"//([^/]+)//", r"\1", para) + para = re.sub(r"\*\*([^*]+)\*\*", r"\1", para) + # Replace \\LF with
LF + para = re.sub(r"\\\\\n", "
\n", para) + # Abstract citations into the citation record + link_match = re.search(r"\[\[(([^|\[\]]+)\|)?([^|\[\]]+)\]\]", para) + while link_match: + # Identify the citation text and cited article + cite_text = link_match.group(2) if link_match.group(2) else link_match.group(3) + cite_title = utils.titlecase(re.sub(r"\s+", " ", link_match.group(3))) + # Record the citation + citations["c"+str(format_id)] = (cite_text, cite_title) + # Stitch the format id in place of the citation + para = para[:link_match.start(0)] + "{c"+str(format_id)+"}" + para[link_match.end(0):] + format_id += 1 # Increment to the next format citation + link_match = re.search(r"\[\[(([^|\[\]]+)\|)?([^|\[\]]+)\]\]", para) + # Convert signature to right-aligned + if para[:1] == '~': + para = "

" + para[1:] + "

\n" + else: + para = "

" + para + "

\n" + content += para + return LexiconArticle(player, turn, title, content, citations) + + @staticmethod + def parse_from_directory(directory): + """ + Reads and parses each source file in the given directory. + Input: directory, the path to the folder to read + Output: a list of parsed articles + """ + articles = [] + print("Reading source files from", directory) + for filename in os.listdir(directory): + path = os.path.join(directory, filename) + # Read only .txt files + if filename[-4:] == ".txt": + print(" Parsing", filename) + with open(path, "r", encoding="utf8") as src_file: + raw = src_file.read() + article = LexiconArticle.from_file_raw(raw) + if article is None: + print(" ERROR") + else: + print(" success:", article.title) + articles.append(article) + return articles + + @staticmethod + def populate(lexicon_articles): + """ + Given a list of lexicon articles, fills out citation information + for each article and creates phantom pages for missing articles. + """ + article_by_title = {article.title : article for article in lexicon_articles} + # Determine all articles that exist or should exist + extant_titles = set([citation[1] for article in lexicon_articles for citation in article.citations]) + # Interlink all citations + for article in lexicon_articles: + for cite_tuple in article.citations.values(): + target = cite_tuple[1] + # Create article objects for phantom citations + if target not in article_by_title: + article_by_title[target] = LexiconArticle(None, sys.maxsize, target, "

This entry hasn't been written yet.

", {}) + # Interlink citations + if article_by_title[target].player is None: + article.pcites.add(target) + else: + article.wcites.add(target) + article_by_title[target].citedby.add(article.title) + return list(article_by_title.values()) + + def build_default_content(self): + """ + Formats citations into the article content as normal HTML links + and returns the result. + """ + format_map = { + format_id: "{0}".format( + cite_tuple[0], utils.titleescape(cite_tuple[1]), + "" if cite_tuple[1] in self.wcites else " class=\"phantom\"") + for format_id, cite_tuple in self.citations.items() + } + return self.content.format(**format_map) + + def build_default_citeblock(self, prev_article, next_article): + """ + Builds the citeblock content HTML for use in regular article pages. + For each defined target, links the target page as Previous or Next. + """ + citeblock = "
\n" + # Prev/next links + if next_article is not None: + citeblock += "

Next →

\n".format( + next_article.title_filesafe, " class=\"phantom\"" if next_article.player is None else "") + if prev_article is not None: + citeblock += "

← Previous

\n".format( + prev_article.title_filesafe, " class=\"phantom\"" if prev_article.player is None else "") + if next_article is None and prev_article is None: + citeblock += "

 

\n" + # Citations + cites_links = [ + "{0}".format( + title, utils.titleescape(title), + "" if title in self.wcites else " class=\"phantom\"") + for title in sorted( + self.wcites | self.pcites, + key=lambda t: utils.titlesort(t))] + cites_str = " | ".join(cites_links) + if len(cites_str) < 1: cites_str = "—" + citeblock += "

Citations: {}

\n".format(cites_str) + # Citedby + citedby_links = [ + "{0}".format( + title, utils.titleescape(title)) + for title in sorted( + self.citedby, + key=lambda t: utils.titlesort(t))] + citedby_str = " | ".join(citedby_links) + if len(citedby_str) < 1: citedby_str = "—" + citeblock += "

Cited by: {}

\n
\n".format(citedby_str) + return citeblock diff --git a/src/build.py b/src/build.py new file mode 100644 index 0000000..f49f8d6 --- /dev/null +++ b/src/build.py @@ -0,0 +1,425 @@ +import sys # For argv and stderr +import os # For reading directories +import re # For parsing lex content +import io # For writing pages out as UTF-8 +import networkx # For pagerank analytics +from collections import defaultdict # For rank inversion in statistics + +from src import utils +from src.article import LexiconArticle + +def build_contents_page(articles, config): + """ + Builds the full HTML of the contents page. + """ + content = "" + # Head the contents page with counts of written and phantom articles + phantom_count = len([article for article in articles if article.player is None]) + if phantom_count == 0: + content = "

There are {0} entries in this lexicon.

\n".format(len(articles)) + else: + content = "

There are {0} entries, {1} written and {2} phantom.

\n".format( + len(articles), len(articles) - phantom_count, phantom_count) + # Prepare article links + link_by_title = {article.title : "{0}".format( + article.title, article.title_filesafe, + " class=\"phantom\"" if article.player is None else "") + for article in articles} + # Write the articles in alphabetical order + content += utils.load_resource("contents.html") + content += "
\n\n
\n" + # Write the articles in turn order + content += "
\n\n
\n" + # Fill in the page skeleton + entry_skeleton = utils.load_resource("entry-page.html") + css = utils.load_resource("lexicon.css") + return entry_skeleton.format( + title="Index of " + config["LEXICON_TITLE"], + lexicon=config["LEXICON_TITLE"], + css=css, + logo=config["LOGO_FILENAME"], + prompt=config["PROMPT"], + sort=config["DEFAULT_SORT"], + content=content, + citeblock="") + +def build_rules_page(config): + """ + Builds the full HTML of the rules page. + """ + content = utils.load_resource("rules.html") + # Fill in the entry skeleton + entry_skeleton = utils.load_resource("entry-page.html") + css = utils.load_resource("lexicon.css") + return entry_skeleton.format( + title="Rules", + lexicon=config["LEXICON_TITLE"], + css=css, + logo=config["LOGO_FILENAME"], + prompt=config["PROMPT"], + sort=config["DEFAULT_SORT"], + content=content, + citeblock="") + +def build_formatting_page(config): + """ + Builds the full HTML of the formatting page. + """ + content = utils.load_resource("formatting.html") + # Fill in the entry skeleton + entry_skeleton = utils.load_resource("entry-page.html") + css = utils.load_resource("lexicon.css") + return entry_skeleton.format( + title="Formatting", + lexicon=config["LEXICON_TITLE"], + css=css, + logo=config["LOGO_FILENAME"], + prompt=config["PROMPT"], + sort=config["DEFAULT_SORT"], + content=content, + citeblock="") + +def build_session_page(config): + """ + Builds the full HTML of the session page. + """ + # Fill in the entry skeleton + entry_skeleton = utils.load_resource("entry-page.html") + css = utils.load_resource("lexicon.css") + return entry_skeleton.format( + title=config["LEXICON_TITLE"], + lexicon=config["LEXICON_TITLE"], + css=css, + logo=config["LOGO_FILENAME"], + prompt=config["PROMPT"], + sort=config["DEFAULT_SORT"], + content=config["SESSION_PAGE"], + citeblock="") + +def build_statistics_page(articles, config): + """ + Builds the full HTML of the statistics page. + """ + content = "" + cite_map = { + article.title : [ + cite_tuple[1] + for cite_tuple in article.citations.values()] + for article in articles} + + # Pages by pagerank + content += "
\n" + content += "

Top 10 pages by page rank:
\n" + G = networkx.Graph() + for citer, citeds in cite_map.items(): + for cited in citeds: + G.add_edge(citer, cited) + ranks = networkx.pagerank(G) + sranks = sorted(ranks.items(), key=lambda x: x[1], reverse=True) + ranking = list(enumerate(map(lambda x: x[0], sranks))) + content += "
\n".join(map(lambda x: "{0} – {1}".format(x[0]+1, x[1]), ranking[:10])) + content += "

\n" + content += "
\n" + + # Top number of citations made + content += "
\n" + content += "

Most citations made from:
\n" + citation_tally = [(kv[0], len(kv[1])) for kv in cite_map.items()] + citation_count = defaultdict(list) + for title, count in citation_tally: citation_count[count].append(title) + content += "
\n".join(map( + lambda kv: "{0} – {1}".format( + kv[0], + "; ".join(sorted( + kv[1], + key=lambda t: utils.titlesort(t)))), + sorted(citation_count.items(), reverse=True)[:3])) + content += "

\n" + content += "
\n" + + # Top number of times cited + content += "
\n" + content += "

Most citations made to:
\n" + all_cited = set([title for cites in cite_map.values() for title in cites]) + cited_by_map = { + cited: [ + citer + for citer in cite_map.keys() + if cited in cite_map[citer]] + for cited in all_cited } + cited_tally = [(kv[0], len(kv[1])) for kv in cited_by_map.items()] + cited_count = defaultdict(list) + for title, count in cited_tally: cited_count[count].append(title) + content += "
\n".join(map( + lambda kv: "{0} – {1}".format(kv[0], "; ".join(sorted(kv[1]))), + sorted(cited_count.items(), reverse=True)[:3])) + content += "

\n" + content += "
\n" + + # Top article length, roughly by words + content += "
\n" + content += "

Longest article:
\n" + article_length = {} + for article in articles: + format_map = { + format_id: cite_tuple[0] + for format_id, cite_tuple in article.citations.items() + } + plain_content = article.content.format(**format_map) + words = len(plain_content.split()) + article_length[article.title] = words + content += "
\n".join(map( + lambda kv: "{0} – {1}".format(kv[1], kv[0]), + sorted(article_length.items(), reverse=True, key=lambda t: t[1])[:3])) + content += "

\n" + content += "
\n" + + # Player pageranks + content += "
\n" + content += "

Player total page rank:
\n" + players = sorted(set([article.player for article in articles if article.player is not None])) + articles_by = { + player : [ + a + for a in articles + if a.player == player] + for player in players} + player_rank = { + player : sum(map(lambda a: ranks[a.title] if a.title in ranks else 0, articles)) + for player, articles in articles_by.items()} + content += "
\n".join(map( + lambda kv: "{0} – {1}".format(kv[0], round(kv[1], 3)), + sorted(player_rank.items(), key=lambda t:t[1], reverse=True))) + content += "

\n" + content += "
\n" + + # Player citations made + content += "
\n" + content += "

Citations made by player
\n" + player_cite_count = { + player : sum(map(lambda a:len(a.wcites | a.pcites), articles)) + for player, articles in articles_by.items()} + content += "
\n".join(map( + lambda kv: "{0} – {1}".format(kv[0], kv[1]), + sorted(player_cite_count.items(), key=lambda t:t[1], reverse=True))) + content += "

\n" + content += "
\n" + + # Player cited count + content += "
\n" + content += "

Citations made to player
\n" + cited_times = {player : 0 for player in players} + for article in articles: + if article.player is not None: + cited_times[article.player] += len(article.citedby) + content += "
\n".join(map( + lambda kv: "{0} – {1}".format(kv[0], kv[1]), + sorted(cited_times.items(), key=lambda t:t[1], reverse=True))) + content += "

\n" + content += "
\n" + + # Fill in the entry skeleton + entry_skeleton = utils.load_resource("entry-page.html") + css = utils.load_resource("lexicon.css") + return entry_skeleton.format( + title="Statistics", + lexicon=config["LEXICON_TITLE"], + css=css, + logo=config["LOGO_FILENAME"], + prompt=config["PROMPT"], + sort=config["DEFAULT_SORT"], + content=content, + citeblock="") + +def build_graphviz_file(cite_map): + """ + Builds a citation graph in dot format for Graphviz. + """ + result = [] + result.append("digraph G {\n") + # Node labeling + written_entries = list(cite_map.keys()) + phantom_entries = set([title for cites in cite_map.values() for title in cites if title not in written_entries]) + node_labels = [title[:20] for title in written_entries + list(phantom_entries)] + node_names = [hash(i) for i in node_labels] + for i in range(len(node_labels)): + result.append("{} [label=\"{}\"];\n".format(node_names[i], node_labels[i])) + # Edges + for citer in written_entries: + for cited in cite_map[citer]: + result.append("{}->{};\n".format(hash(citer[:20]), hash(cited[:20]))) + # Return result + result.append("overlap=false;\n}\n") + return "".join(result)#"…" + +def build_compiled_page(articles, config): + """ + Builds a page compiling all articles in the Lexicon. + """ + # Sort by turn and title + turn_order = sorted( + articles, + key=lambda a: (a.turn, utils.titlesort(a.title))) + + # Build the content of each article + css = utils.load_resource("lexicon.css") + css += "\n"\ + "body { background: #ffffff; }\n"\ + "sup { vertical-align: top; font-size: 0.6em; }\n" + content = "\n"\ + "\n"\ + "{lexicon}\n"\ + "\n"\ + "\n"\ + "

{lexicon}

".format( + lexicon=config["LEXICON_TITLE"], + css=css) + for article in turn_order: + # Stitch in superscripts for citations + format_map = { + format_id: "{}{}".format(cite_tuple[0], format_id[1:]) + for format_id, cite_tuple in article.citations.items() + } + article_body = article.content.format(**format_map) + # Stitch a page-break-avoid div around the header and first paragraph + article_body = article_body.replace("

", "

", 1) + # Append the citation block + cite_list = "
\n".join( + "{}. {}\n".format(format_id[1:], cite_tuple[1]) + for format_id, cite_tuple in sorted( + article.citations.items(), + key=lambda t:int(t[0][1:]))) + cite_block = "" if article.player is None else ""\ + "

Citations:
\n"\ + "{}\n

".format(cite_list) + article_block = "
\n"\ + "

{}

\n"\ + "{}\n"\ + "{}\n".format(article.title, article_body, cite_block) + content += article_block + + content += "" + return content + +def build_all(path_prefix, lexicon_name): + """ + Builds all browsable articles and pages in the Lexicon. + """ + lex_path = os.path.join(path_prefix, lexicon_name) + # Load the Lexicon's peripherals + config = utils.load_config(lexicon_name) + entry_skeleton = utils.load_resource("entry-page.html") + css = utils.load_resource("lexicon.css") + # Parse the written articles + articles = LexiconArticle.parse_from_directory(os.path.join(lex_path, "src")) + # At this point, the articles haven't been cross-populated, + # so we can derive the written titles from this list + #written_titles = [article.title for article in articles] + # Once they've been populated, the articles list has the titles of all articles + # Sort this by turn before title so prev/next links run in turn order + articles = sorted( + LexiconArticle.populate(articles), + key=lambda a: (a.turn, utils.titlesort(a.title))) + #phantom_titles = [article.title for article in articles if article.title not in written_titles] + def pathto(*els): + return os.path.join(lex_path, *els) + + # Write the redirect page + print("Writing redirect page...") + with open(pathto("index.html"), "w", encoding="utf8") as f: + f.write(utils.load_resource("redirect.html").format(lexicon=config["LEXICON_TITLE"], sort=config["DEFAULT_SORT"])) + + # Write the article pages + print("Deleting old article pages...") + for filename in os.listdir(pathto("article")): + if filename[-5:] == ".html": + os.remove(pathto("article", filename)) + print("Writing article pages...") + l = len(articles) + for idx in range(l): + article = articles[idx] + with open(pathto("article", article.title_filesafe + ".html"), "w", encoding="utf-8") as f: + content = article.build_default_content() + citeblock = article.build_default_citeblock( + None if idx == 0 else articles[idx - 1], + None if idx == l-1 else articles[idx + 1]) + article_html = entry_skeleton.format( + title = article.title, + lexicon = config["LEXICON_TITLE"], + css = css, + logo = config["LOGO_FILENAME"], + prompt = config["PROMPT"], + sort = config["DEFAULT_SORT"], + content = content, + citeblock = citeblock) + f.write(article_html) + print(" Wrote " + article.title) + + # Write default pages + print("Writing default pages...") + with open(pathto("contents", "index.html"), "w", encoding="utf-8") as f: + f.write(build_contents_page(articles, config)) + print(" Wrote Contents") + with open(pathto("rules", "index.html"), "w", encoding="utf-8") as f: + f.write(build_rules_page(config)) + print(" Wrote Rules") + with open(pathto("formatting", "index.html"), "w", encoding="utf-8") as f: + f.write(build_formatting_page(config)) + print(" Wrote Formatting") + with open(pathto("session", "index.html"), "w", encoding="utf-8") as f: + f.write(build_session_page(config)) + print(" Wrote Session") + with open(pathto("statistics", "index.html"), "w", encoding="utf-8") as f: + f.write(build_statistics_page(articles, config)) + print(" Wrote Statistics") + + # Write auxiliary pages + if "PRINTABLE_FILE" in config and config["PRINTABLE_FILE"]: + with open(pathto(config["PRINTABLE_FILE"]), "w", encoding="utf-8") as f: + f.write(build_compiled_page(articles, config)) + print(" Wrote compiled page to " + config["PRINTABLE_FILE"]) + + # Check that authors aren't citing themselves + print("Running citation checks...") + article_by_title = {article.title : article for article in articles} + for article in articles: + for _, tup in article.citations.items(): + cited = article_by_title[tup[1]] + if article.player == cited.player: + print(" {2}: {0} cites {1}".format(article.title, cited.title, cited.player)) + + print() diff --git a/src/resources/contents.html b/src/resources/contents.html new file mode 100644 index 0000000..a7c2543 --- /dev/null +++ b/src/resources/contents.html @@ -0,0 +1,29 @@ + + diff --git a/resources/entry-page.html b/src/resources/entry-page.html similarity index 86% rename from resources/entry-page.html rename to src/resources/entry-page.html index 59619d3..cf65ece 100644 --- a/resources/entry-page.html +++ b/src/resources/entry-page.html @@ -2,6 +2,7 @@ {title} | {lexicon} + @@ -11,7 +12,7 @@

{lexicon}

-Contents — +ContentsRulesFormattingSession — @@ -23,4 +24,4 @@

{title}

{content}
{citeblock} - \ No newline at end of file + diff --git a/raw/example-page.txt b/src/resources/example-page.txt similarity index 91% rename from raw/example-page.txt rename to src/resources/example-page.txt index 092d315..cb7d04b 100644 --- a/raw/example-page.txt +++ b/src/resources/example-page.txt @@ -1,4 +1,4 @@ -# Author: Authorname +# Player: PN # Turn: 1 # Title: Example page @@ -12,4 +12,4 @@ Unlike the last paragraph, this line will be after a line break within the parag This is an [[example citation|Phantom page]]. You can also cite a [[phantom page]] with just the title. -~Dr. X. Amplepage \ No newline at end of file +~Dr. X. Amplepage diff --git a/resources/formatting.html b/src/resources/formatting.html similarity index 81% rename from resources/formatting.html rename to src/resources/formatting.html index 9521423..b2b3341 100644 --- a/resources/formatting.html +++ b/src/resources/formatting.html @@ -1,6 +1,6 @@

Lexipython provides support for a limited amount of Markdown-esque formatting.

-# Author: Authorname
+# Player: PN
 # Turn: 1
 # Title: Example page
 
@@ -16,8 +16,8 @@ This is an [[example citation|Phantom page]]. You can also cite a [[phantom page
 
 ~Dr. X. Amplepage
 
-

Each turn, fill out the header with your author information, the current turn, and the title of your entry. It doesn't really matter what the Author field is, except that it must be the same across all articles you write.

-

Two line breaks begins a new paragraph. A single line break does nothing, unless the line is neded by a double backslash (\\).

+

Each turn, fill out the header with your player information, the current turn, and the title of your entry. The Player field can be anything as long as it's the same for all articles you write (even when they're by different characters). Using your initials is recommended.

+

Two line breaks begins a new paragraph. A single line break does nothing, unless the line is ended by a double backslash (\\).

Text bounded by ** will be bolded: **bold** produces bold. Text bounded by // will be italicized: //italics// produces italics.

To cite another Lexicon entry, use double brackets. Text in double brackets will cite and link to the entry of the same name: [[Example page]] produces Example page. Text in double brackets split with a | will alias the link as the left text and link to the entry with the name of the right text: [[this text|Example page]] produces this text. You must be precise in the entry title you cite to. Citations to "Example" vs. "The Example" will point to different entries and create different phantoms, and your GM will probably have to clean up after you.

Beginning a paragraph with ~ will right-align it and place a horizontal line above it. Use this for signing your entry with your scholar's name.

diff --git a/lexicon.cfg b/src/resources/lexicon.cfg similarity index 73% rename from lexicon.cfg rename to src/resources/lexicon.cfg index 5b16ee7..7185b72 100644 --- a/lexicon.cfg +++ b/src/resources/lexicon.cfg @@ -43,3 +43,19 @@ PQRS TUV WXYZ <<>>DEFAULT_SORT>>> +?byturn +<<>>GRAPHVIZ_FILE>>> +<<>>PRINTABLE_FILE>>> +<< + +{lexicon} + + + +

Redirecting to {lexicon}...

+ + \ No newline at end of file diff --git a/resources/rules.html b/src/resources/rules.html similarity index 100% rename from resources/rules.html rename to src/resources/rules.html diff --git a/src/utils.py b/src/utils.py new file mode 100644 index 0000000..ea47974 --- /dev/null +++ b/src/utils.py @@ -0,0 +1,76 @@ +import os +import re +from urllib import parse + +# Short utility functions for handling titles + +def titlecase(s): + """ + Capitalizes the first word. + """ + s = s.strip() + return s[:1].capitalize() + s[1:] + +def titleescape(s): + """ + Makes an article title filename-safe. + """ + s = s.strip() + s = re.sub(r"\s+", '_', s) # Replace whitespace with _ + s = parse.quote(s) # Encode all other characters + s = re.sub(r"%", "", s) # Strip encoding %s + if len(s) > 64: # If the result is unreasonably long, + s = hex(abs(hash(s)))[2:] # Replace it with a hex hash + return s + +def titlesort(s): + """ + Reduces titles down for sorting. + """ + s = s.lower() + if s.startswith("the "): return s[4:] + if s.startswith("an "): return s[3:] + if s.startswith("a "): return s[2:] + return s + +# Load functions + +def load_resource(filename, cache={}): + """Loads files from the resources directory with caching.""" + if filename not in cache: + with open(os.path.join("src", "resources", filename), "r", encoding="utf-8") as f: + cache[filename] = f.read() + return cache[filename] + +def load_config(name): + """ + Loads values from a Lexicon's config file. + """ + config = {} + with open(os.path.join("lexicon", name, "lexicon.cfg"), "r", encoding="utf8") as f: + line = f.readline() + while line: + # Skim lines until a value definition begins + conf_match = re.match(">>>([^>]+)>>>\s+", line) + if not conf_match: + line = f.readline() + continue + # Accumulate the conf value until the value ends + conf = conf_match.group(1) + conf_value = "" + line = f.readline() + conf_match = re.match("<<<{0}<<<\s+".format(conf), line) + while line and not conf_match: + conf_value += line + line = f.readline() + conf_match = re.match("<<<{0}<<<\s+".format(conf), line) + if not line: + # TODO Not this + raise SystemExit("Reached EOF while reading config value {}".format(conf)) + config[conf] = conf_value.strip() + # Check that all necessary values were configured + for config_value in ['LEXICON_TITLE', 'PROMPT', 'SESSION_PAGE', "INDEX_LIST"]: + if config_value not in config: + # TODO Not this either + raise SystemExit("Error: {} not set in lexipython.cfg".format(config_value)) + return config