From 00cc4e9cfeba2c975544086fac10c5b214bcea0d Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Tue, 30 Oct 2018 10:17:54 -0700 Subject: [PATCH 01/40] Copy CSS during lexicon init --- lexipython.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lexipython.py b/lexipython.py index c01864d..89ebfe6 100755 --- a/lexipython.py +++ b/lexipython.py @@ -138,6 +138,9 @@ def command_init(name): # Create the Lexicon's config file with open(os.path.join(lex_path, "lexicon.cfg"), "w") as config_file: config_file.write(config) + # Copy the CSS file + with open(os.path.join(lex_path, "lexicon.css"), "w") as css_file: + css_file.write(utils.load_resource("lexicon.css")) # Create an example page with open(os.path.join(lex_path, "src", "example-page.txt"), "w") as destfile: destfile.write(utils.load_resource("example-page.txt")) From 7490cd6f7fa289f44eb5245d6c7d554cc62fd28b Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Wed, 31 Oct 2018 15:22:15 -0700 Subject: [PATCH 02/40] Refactor citations and add addendum articles --- src/article.py | 230 +++++++++++++++++++++++++++++++++---------------- src/build.py | 110 +++++++++++------------ 2 files changed, 205 insertions(+), 135 deletions(-) diff --git a/src/article.py b/src/article.py index 9a0b09f..6df114d 100644 --- a/src/article.py +++ b/src/article.py @@ -3,21 +3,50 @@ import sys import re import src.utils as utils +class LexiconCitation: + """ + Represents information about a single citation in a Lexicon article. + + Members: + id int: citation id within the article, corresponding to a "{cN}" + format hook + text string: alias text linked to the citation target + target string: title of the article being cited + article LexiconArticle: article cited, None until interlink + """ + def __init__(self, id, citation_text, citation_target, article=None): + self.id = id + self.text = citation_text + self.target = citation_target + self.article = article + + def __repr__(self): + return "".format(self) + + def __str__(self): + return "<[{0.id}]:[[{0.text}|{0.target}]]>".format(self) + + def format(self, format_str): + return format_str.format(**self.__dict__) + class LexiconArticle: """ A Lexicon article and its metadata. - Members: - player string: the player of the article - turn integer: the turn the article was written for - title string: the article title - title_filesafe string: the title, escaped, used for filenames - content string: the HTML content, with citations replaced by format hooks - citations dict mapping format hook string to tuple of link alias and link target title - wcites list: titles of written articles cited - pcites list: titles of phantom articles cited - citedby list: titles of articles that cite this - The last three are filled in by populate(). + Members defined by __init__: + player string: player who wrote the article + turn integer: turn the article was written for + title string: article title + title_filesafe string: title, escaped, used for filenames + content string: HTML content, with citations replaced by format hooks + citations list of LexiconCitations: citations made by the article + link_class string: CSS class to interpolate (for styling phantoms) + + Members undefined until interlink: + addendums list of LexiconArticles: addendum articles to this article + citedby set of LexiconArticles: articles that cite this article + prev_article LexiconArticle: the previous article in read order + next_article LexiconArticle: the next article in read order """ def __init__(self, player, turn, title, content, citations): @@ -30,9 +59,17 @@ class LexiconArticle: self.title_filesafe = utils.titleescape(title) self.content = content self.citations = citations - self.wcites = set() - self.pcites = set() + self.link_class = "class=\"phantom\"" if player is None else "" + self.addendums = [] self.citedby = set() + self.prev_article = None + self.next_article = None + + def __repr__(self): + return "".format(self) + + def __str__(self): + return "<\"{0.title}\", {0.player} turn {0.turn}>".format(self) @staticmethod def from_file_raw(raw_content): @@ -68,7 +105,7 @@ class LexiconArticle: # Parse the content and extract citations paras = re.split("\n\n+", content_raw.strip()) content = "" - citations = {} + citations = [] format_id = 1 if not paras: print("No content") @@ -91,7 +128,8 @@ class LexiconArticle: cite_text = link_match.group(2) if link_match.group(2) else link_match.group(3) cite_title = utils.titlecase(re.sub(r"\s+", " ", link_match.group(3))) # Record the citation - citations["c"+str(format_id)] = (cite_text, cite_title) + cite = LexiconCitation(format_id, cite_text, cite_title) + citations.append(cite) # Stitch the format id in place of the citation para = para[:link_match.start(0)] + "{c"+str(format_id)+"}" + para[link_match.end(0):] format_id += 1 # Increment to the next format citation @@ -129,83 +167,125 @@ class LexiconArticle: return articles @staticmethod - def populate(lexicon_articles): + def interlink(lexicon_articles): """ - Given a list of lexicon articles, fills out citation information - for each article and creates phantom pages for missing articles. + Fills out fields on articles that require other articles for context. + Creates phantom articles. """ - article_by_title = {article.title : article for article in lexicon_articles} - # Determine all articles that exist or should exist - extant_titles = set([citation[1] for article in lexicon_articles for citation in article.citations]) - # Interlink all citations - for article in lexicon_articles: - for cite_tuple in article.citations.values(): - target = cite_tuple[1] - # Create article objects for phantom citations - if target not in article_by_title: - article_by_title[target] = LexiconArticle(None, sys.maxsize, target, - "

This entry hasn't been written yet.

", {}) - # Interlink citations - if article_by_title[target].player is None: - article.pcites.add(target) - else: - article.wcites.add(target) - article_by_title[target].citedby.add(article.title) - return list(article_by_title.values()) + # Sort out which articles are addendums and which titles are phantoms + written_titles = set() + cited_titles = set() + article_by_title = {} + written_articles_ordered = sorted(lexicon_articles, key=lambda a: (a.turn, a.title)) + for written_article in written_articles_ordered: + # Track main articles by title + if written_article.title not in written_titles: + article_by_title[written_article.title] = written_article + written_titles.add(written_article.title) + # Append addendums to their parents + else: + parent = article_by_title[written_article.title] + parent.addendums.append(written_article) + # Collect all cited titles + for citation in written_article.citations: + cited_titles.add(citation.target) + # Create articles for each phantom title + for title in cited_titles - written_titles: + phantom_article = LexiconArticle( + None, sys.maxsize, title, + "

This entry hasn't been written yet.

", {}) + article_by_title[title] = phantom_article + # To interlink the articles, each citation needs to have its .article + # filled in, and that article needs its citedby updated. + for parent in article_by_title.values(): + under_title = [parent] + parent.addendums + for citing_article in under_title: + for citation in citing_article.citations: + target_article = article_by_title[citation.target] + citation.article = target_article + target_article.citedby.add(citing_article) + # Sort the articles by turn and title, then fill in prev/next fields + articles_ordered = sorted(article_by_title.values(), key=lambda a: (a.turn, utils.titlesort(a.title))) + for i in range(len(articles_ordered)): + articles_ordered[i].prev_article = articles_ordered[i-1] if i != 0 else None + articles_ordered[i].next_article = articles_ordered[i+1] if i != len(articles_ordered)-1 else None + return articles_ordered - def build_default_contentblock(self): + def build_default_content(self): """ - Formats citations into the article content as normal HTML links - and returns the result. + Builds the contents of the content div for an article page. + """ + content = "" + # Build the main article content block + main_body = self.build_default_article_body() + content += "

{}

{}
\n".format( + self.title, main_body) + # Build the main citation content block + main_citations = self.build_default_citeblock( + self.prev_article, self.next_article) + if main_citations: + content += "
{}
\n".format( + main_citations) + # Build any addendum content blocks + for addendum in self.addendums: + add_body = addendum.build_default_article_body() + content += "
{}
\n".format(add_body) + add_citations = addendum.build_default_citeblock(None, None) + if add_citations: + content += "
{}
\n".format( + add_citations) + return content + + def build_default_article_body(self): + """ + Formats citations into the article text and returns the article body. """ format_map = { - format_id: "{0}".format( - cite_tuple[0], utils.titleescape(cite_tuple[1]), - "" if cite_tuple[1] in self.wcites else " class=\"phantom\"") - for format_id, cite_tuple in self.citations.items() + "c"+str(c.id) : c.format("{text}") + for c in self.citations } - article_content = self.content.format(**format_map) - return "
\n

{}

\n{}
\n".format( - self.title, - article_content) + return self.content.format(**format_map) def build_default_citeblock(self, prev_article, next_article): """ - Builds the citeblock content HTML for use in regular article pages. - For each defined target, links the target page as Previous or Next. + Builds the contents of a citation contentblock. For each defined target, + links the target page as Previous or Next. Prev/next and cites/citedby + elements are not included if they have no content. """ - citeblock = "
\n" + content = "" # Prev/next links: if next_article is not None or prev_article is not None: - prev_link = ("← Previous".format( - prev_article.title_filesafe, - " class=\"phantom\"" if prev_article.player is None else "") + prev_link = ("← Previous".format( + prev_article) if prev_article is not None else "") - next_link = ("Next →".format( - next_article.title_filesafe, - " class=\"phantom\"" if next_article.player is None else "") + next_link = ("Next →".format( + next_article) if next_article is not None else "") - citeblock += "\n\n\n
{}{}
\n".format( + content += "\n\n\n
{}{}
\n".format( prev_link, next_link) # Citations - cites_links = [ - "{0}".format( - title, utils.titleescape(title), - "" if title in self.wcites else " class=\"phantom\"") - for title in sorted( - self.wcites | self.pcites, - key=lambda t: utils.titlesort(t))] + cites_titles = set() + cites_links = [] + for citation in sorted(self.citations, key=lambda c: (utils.titlesort(c.target), c.id)): + if citation.target not in cites_titles: + cites_titles.add(citation.target) + cites_links.append( + citation.format( + "{article.title}")) cites_str = " / ".join(cites_links) - if len(cites_str) < 1: cites_str = "—" - citeblock += "

Citations: {}

\n".format(cites_str) + if len(cites_str) > 0: + content += "

Citations: {}

\n".format(cites_str) # Citedby - citedby_links = [ - "{0}".format( - title, utils.titleescape(title)) - for title in sorted( - self.citedby, - key=lambda t: utils.titlesort(t))] + citedby_titles = set() + citedby_links = [] + for article in sorted(self.citedby, key=lambda a: (utils.titlesort(a.title), a.turn)): + if article.title not in citedby_titles: + citedby_titles.add(article.title) + citedby_links.append( + "{0.title}".format(article)) citedby_str = " / ".join(citedby_links) - if len(citedby_str) < 1: citedby_str = "—" - citeblock += "

Cited by: {}

\n
\n".format(citedby_str) - return citeblock + if len(citedby_str) > 0: + content += "

Cited by: {}

\n".format(citedby_str) + + return content diff --git a/src/build.py b/src/build.py index bdab5ca..40413e5 100644 --- a/src/build.py +++ b/src/build.py @@ -133,20 +133,13 @@ def build_statistics_page(page, articles): Builds the full HTML of the statistics page. """ content = "" - cite_map = { - article.title : [ - cite_tuple[1] - for cite_tuple - in article.citations.values() - ] - for article in articles} # Top pages by pagerank # Compute pagerank for each article G = networkx.Graph() - for citer, citeds in cite_map.items(): - for cited in citeds: - G.add_edge(citer, cited) + for article in articles: + for citation in article.citations: + G.add_edge(article.title, citation.target) rank_by_article = networkx.pagerank(G) # Get the top ten articles by pagerank top_pageranks = reverse_statistics_dict(rank_by_article)[:10] @@ -156,34 +149,25 @@ def build_statistics_page(page, articles): top_ranked_items = itemize(top_ranked) # Write the statistics to the page content += "
\n" - content += "Top 10 pages by page rank:
\n" + content += "Top 10 articles by page rank:
\n" content += "
\n".join(top_ranked_items) content += "
\n" # Top number of citations made - citations_made = { title : len(cites) for title, cites in cite_map.items() } + citations_made = {article.title : len(article.citations) for article in articles} top_citations = reverse_statistics_dict(citations_made)[:3] top_citations_items = itemize(top_citations) content += "
\n" - content += "Most citations made from:
\n" + content += "Top articles by citations made:
\n" content += "
\n".join(top_citations_items) content += "
\n" # Top number of times cited - # Build a map of what cites each article - all_cited = set([title for citeds in cite_map.values() for title in citeds]) - cited_by_map = { - cited: [ - citer - for citer in cite_map.keys() - if cited in cite_map[citer]] - for cited in all_cited } - # Compute the number of citations to each article - citations_to = { title : len(cites) for title, cites in cited_by_map.items() } + citations_to = {article.title : len(article.citedby) for article in articles} top_cited = reverse_statistics_dict(citations_to)[:3] top_cited_items = itemize(top_cited) content += "
\n" - content += "Most citations made to:
\n" + content += "Most cited articles:
\n" content += "
\n".join(top_cited_items) content += "
\n" @@ -191,16 +175,15 @@ def build_statistics_page(page, articles): article_length = {} for article in articles: format_map = { - format_id: cite_tuple[0] - for format_id, cite_tuple in article.citations.items() + "c"+str(c.id): c.text + for c in article.citations } plain_content = article.content.format(**format_map) - wordcount = len(plain_content.split()) - article_length[article.title] = wordcount + article_length[article.title] = len(plain_content.split()) top_length = reverse_statistics_dict(article_length)[:3] top_length_items = itemize(top_length) content += "
\n" - content += "Longest article:
\n" + content += "Longest articles:
\n" content += "
\n".join(top_length_items) content += "
\n" @@ -211,21 +194,23 @@ def build_statistics_page(page, articles): content += "\n" # Player pageranks + # Add addendums and recompute pagerank + for article in articles: + for addendum in article.addendums: + for citation in addendum.citations: + addendum_title = "{0.title}-T{0.turn}".format(addendum) + G.add_edge(addendum_title, citation.target) + rank_by_article = networkx.pagerank(G) players = sorted(set([article.player for article in articles if article.player is not None])) - articles_by_player = { - player : [ - a - for a in articles - if a.player == player] - for player in players} - pagerank_by_player = { - player : round( - sum(map( - lambda a: rank_by_article[a.title] if a.title in rank_by_article else 0, - articles)), - 3) - for player, articles - in articles_by_player.items()} + pagerank_by_player = {player: 0 for player in players} + for article in articles: + if article.player is not None: + pagerank_by_player[article.player] += rank_by_article[article.title] + for addendum in article.addendums: + addendum_title = "{0.title}-T{0.turn}".format(addendum) + pagerank_by_player[addendum_title] += rank_by_article[addendum_title] + for player in players: + pagerank_by_player[player] = round(pagerank_by_player[player], 3) player_rank = reverse_statistics_dict(pagerank_by_player) player_rank_items = itemize(player_rank) content += "
\n" @@ -234,13 +219,17 @@ def build_statistics_page(page, articles): content += "
\n" # Player citations made - player_cite_count = { - player : sum(map(lambda a:len(a.wcites | a.pcites), articles)) - for player, articles in articles_by_player.items()} - player_cites_made_ranks = reverse_statistics_dict(player_cite_count) + cite_count_by_player = {player: 0 for player in players} + for article in articles: + if article.player is not None: + unique_citations = set([a.target for a in article.citations]) + cite_count_by_player[article.player] += len(unique_citations) + for addendum in article.addendums: + cite_count_by_player[addendum.player] += len(addendum.citations) + player_cites_made_ranks = reverse_statistics_dict(cite_count_by_player) player_cites_made_items = itemize(player_cites_made_ranks) content += "
\n" - content += "Citations made by player
\n" + content += "Citations made by player:
\n" content += "
\n".join(player_cites_made_items) content += "
\n" @@ -252,7 +241,7 @@ def build_statistics_page(page, articles): cited_times_ranked = reverse_statistics_dict(cited_times) cited_times_items = itemize(cited_times_ranked) content += "
\n" - content += "Citations made to player
\n" + content += "Citations made to player:
\n" content += "
\n".join(cited_times_items) content += "
\n" @@ -350,7 +339,7 @@ def build_all(path_prefix, lexicon_name): # Once they've been populated, the articles list has the titles of all articles # Sort this by turn before title so prev/next links run in turn order articles = sorted( - LexiconArticle.populate(articles), + LexiconArticle.interlink(articles), key=lambda a: (a.turn, utils.titlesort(a.title))) def pathto(*els): @@ -372,13 +361,14 @@ def build_all(path_prefix, lexicon_name): for idx in range(l): article = articles[idx] with open(pathto("article", article.title_filesafe + ".html"), "w", encoding="utf-8") as f: - contentblock = article.build_default_contentblock() - citeblock = article.build_default_citeblock( - None if idx == 0 else articles[idx - 1], - None if idx == l-1 else articles[idx + 1]) + content = article.build_default_content() + #contentblock = article.build_default_contentblock() + #citeblock = article.build_default_citeblock( + # None if idx == 0 else articles[idx - 1], + # None if idx == l-1 else articles[idx + 1]) article_html = page.format( title = article.title, - content = contentblock + citeblock) + content = content) f.write(article_html) print(" Wrote " + article.title) @@ -409,10 +399,10 @@ def build_all(path_prefix, lexicon_name): # Check that authors aren't citing themselves print("Running citation checks...") article_by_title = {article.title : article for article in articles} - for article in articles: - for _, tup in article.citations.items(): - cited = article_by_title[tup[1]] - if article.player == cited.player: - print(" {2}: {0} cites {1}".format(article.title, cited.title, cited.player)) + #for article in articles: + # for _, tup in article.citations.items(): + # cited = article_by_title[tup[1]] + # if article.player == cited.player: + # print(" {2}: {0} cites {1}".format(article.title, cited.title, cited.player)) print() From 87df1b748087442ad0ecb9cf9296f6d749baa43f Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Fri, 2 Nov 2018 00:50:34 -0700 Subject: [PATCH 03/40] Fix pagerank crashing on citationless articles --- src/build.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/build.py b/src/build.py index 40413e5..02b6db9 100644 --- a/src/build.py +++ b/src/build.py @@ -205,10 +205,12 @@ def build_statistics_page(page, articles): pagerank_by_player = {player: 0 for player in players} for article in articles: if article.player is not None: - pagerank_by_player[article.player] += rank_by_article[article.title] + pagerank_by_player[article.player] += (rank_by_article[article.title] + if article.title in rank_by_article else 0) for addendum in article.addendums: addendum_title = "{0.title}-T{0.turn}".format(addendum) - pagerank_by_player[addendum_title] += rank_by_article[addendum_title] + pagerank_by_player[addendum_title] += (rank_by_article[addendum_title] + if addendum_title in rank_by_article else 0) for player in players: pagerank_by_player[player] = round(pagerank_by_player[player], 3) player_rank = reverse_statistics_dict(pagerank_by_player) From cd0e3d895bc4f596d06ed13cc6f688666d0fdff6 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 3 Nov 2018 14:53:37 -0700 Subject: [PATCH 04/40] Fix citation check for new citation semantics --- src/build.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/build.py b/src/build.py index 02b6db9..ac5ccf5 100644 --- a/src/build.py +++ b/src/build.py @@ -400,11 +400,10 @@ def build_all(path_prefix, lexicon_name): # Check that authors aren't citing themselves print("Running citation checks...") - article_by_title = {article.title : article for article in articles} - #for article in articles: - # for _, tup in article.citations.items(): - # cited = article_by_title[tup[1]] - # if article.player == cited.player: - # print(" {2}: {0} cites {1}".format(article.title, cited.title, cited.player)) + for parent in articles: + for article in [parent] + parent.addendums: + for citation in article.citations: + if article.player == citation.article.player: + print(" {2}: {0} cites {1}".format(article.title, citation.target, article.player)) print() From 8260b014f34b37bdba1c46e114d93a501f41dd76 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 3 Nov 2018 14:54:13 -0700 Subject: [PATCH 05/40] Add custom editor --- src/build.py | 18 ++++ src/resources/editor.html | 183 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 201 insertions(+) create mode 100644 src/resources/editor.html diff --git a/src/build.py b/src/build.py index ac5ccf5..c7198a0 100644 --- a/src/build.py +++ b/src/build.py @@ -398,6 +398,24 @@ def build_all(path_prefix, lexicon_name): f.write(build_compiled_page(articles, config)) print(" Wrote compiled page to " + config["PRINTABLE_FILE"]) + with open(pathto("editor.html"), "w", encoding="utf-8") as f: + editor = utils.load_resource("editor.html") + writtenArticles = "" + phantomArticles = "" + for article in articles: + if article.player is None: + phantomArticles += "{{title: \"{0}\"}},".format(article.title.replace("\"", "\\\"")) + else: + writtenArticles += "{{title: \"{0}\", author: \"{1.player}\"}},".format( + article.title.replace("\"", "\\\""), article) + nextTurn = 0 + if articles: + nextTurn = max([article.turn for article in articles if article.player is not None]) + 1 + editor = editor.replace("//writtenArticles", writtenArticles) + editor = editor.replace("//phantomArticles", phantomArticles) + editor = editor.replace("TURNNUMBER", str(nextTurn)) + f.write(editor) + # Check that authors aren't citing themselves print("Running citation checks...") for parent in articles: diff --git a/src/resources/editor.html b/src/resources/editor.html new file mode 100644 index 0000000..cdaaa55 --- /dev/null +++ b/src/resources/editor.html @@ -0,0 +1,183 @@ + + +Lexicon Editor + + + + +
+

Lexicon Editor

+
+ +
+
+ + + + + + + + + + +
# Player:
# Turn:
# Title:
+ +
+
+
+
+
+
+ + From ec953b6a9942d6312d4cf72f0fe810429d0c9502 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 3 Nov 2018 19:07:52 -0700 Subject: [PATCH 06/40] Remove commented-out code --- src/build.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/build.py b/src/build.py index c7198a0..5f4520b 100644 --- a/src/build.py +++ b/src/build.py @@ -364,10 +364,6 @@ def build_all(path_prefix, lexicon_name): article = articles[idx] with open(pathto("article", article.title_filesafe + ".html"), "w", encoding="utf-8") as f: content = article.build_default_content() - #contentblock = article.build_default_contentblock() - #citeblock = article.build_default_citeblock( - # None if idx == 0 else articles[idx - 1], - # None if idx == l-1 else articles[idx + 1]) article_html = page.format( title = article.title, content = content) From 706b29d202b82be0e9e15f6523de51202b38b0fe Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 3 Nov 2018 19:11:04 -0700 Subject: [PATCH 07/40] Add turn number to editor download for addendums --- src/resources/editor.html | 1 + 1 file changed, 1 insertion(+) diff --git a/src/resources/editor.html b/src/resources/editor.html index cdaaa55..3a13bd5 100644 --- a/src/resources/editor.html +++ b/src/resources/editor.html @@ -132,6 +132,7 @@ "\n" + articleBody; var articleFilename = articleTitle.toLowerCase().replace(/[^a-z0-9- ]/g, "").replace(/ +/g, "-"); + articleFilename += "-" + articleTurn.toString(); var downloader = document.createElement("a"); downloader.setAttribute("href", "data:text/plain;charset=utf-8," + encodeURIComponent(articleText)); downloader.setAttribute("download", articleFilename); From a133f2c86556e5f69974c38ec28349447b33facc Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 3 Nov 2018 21:00:39 -0700 Subject: [PATCH 08/40] Add by-turn word count --- src/build.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/build.py b/src/build.py index 5f4520b..ca41c0b 100644 --- a/src/build.py +++ b/src/build.py @@ -188,9 +188,20 @@ def build_statistics_page(page, articles): content += "\n" # Total word count + all_articles = [] + for article in articles: + all_articles.append(article) + all_articles.extend(article.addendums) + turn_numbers = set([a.turn for a in articles if a.player is not None]) + aggregate = {num: 0 for num in turn_numbers} + for turn_num in turn_numbers: + for article in all_articles: + if article.turn <= turn_num: + aggregate[turn_num] += article_length[article.title] + aggr_list = [(str(k), [str(v)]) for k,v in aggregate.items()] content += "
\n" - content += "Total word count:
\n" - content += str(sum(article_length.values())) + content += "Aggregate word count by turn:
\n" + content += "
\n".join(itemize(aggr_list)) content += "
\n" # Player pageranks From 56421820bb976fe85735730b3ba2e32b86b07d6a Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sun, 4 Nov 2018 00:02:48 -0700 Subject: [PATCH 09/40] Add pagerank bottom statistic --- src/build.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/build.py b/src/build.py index ca41c0b..39e535a 100644 --- a/src/build.py +++ b/src/build.py @@ -211,17 +211,17 @@ def build_statistics_page(page, articles): for citation in addendum.citations: addendum_title = "{0.title}-T{0.turn}".format(addendum) G.add_edge(addendum_title, citation.target) - rank_by_article = networkx.pagerank(G) + rank_by_article_all = networkx.pagerank(G) players = sorted(set([article.player for article in articles if article.player is not None])) pagerank_by_player = {player: 0 for player in players} for article in articles: if article.player is not None: - pagerank_by_player[article.player] += (rank_by_article[article.title] - if article.title in rank_by_article else 0) + pagerank_by_player[article.player] += (rank_by_article_all[article.title] + if article.title in rank_by_article_all else 0) for addendum in article.addendums: addendum_title = "{0.title}-T{0.turn}".format(addendum) - pagerank_by_player[addendum_title] += (rank_by_article[addendum_title] - if addendum_title in rank_by_article else 0) + pagerank_by_player[addendum_title] += (rank_by_article_all[addendum_title] + if addendum_title in rank_by_article_all else 0) for player in players: pagerank_by_player[player] = round(pagerank_by_player[player], 3) player_rank = reverse_statistics_dict(pagerank_by_player) @@ -258,6 +258,17 @@ def build_statistics_page(page, articles): content += "
\n".join(cited_times_items) content += "\n" + # Lowest pagerank + pageranks = reverse_statistics_dict(rank_by_article) + bot_ranked = list(enumerate(map(lambda x: x[1], pageranks), start=1))[-10:] + # Format the ranks into strings + bot_ranked_items = itemize(bot_ranked) + content += "
\n" + content += "Bottom 10 articles by pagerank:
\n" + content += "
\n".join(bot_ranked_items) + content += "
\n" + + # Fill in the entry skeleton return page.format(title="Statistics", content=content) From e6293eab5054820f6518a06b6652067afb5dee98 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sun, 4 Nov 2018 00:07:43 -0700 Subject: [PATCH 10/40] Filter out phantoms from bottom pagerank --- src/build.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/build.py b/src/build.py index 39e535a..1fe8a6e 100644 --- a/src/build.py +++ b/src/build.py @@ -258,7 +258,13 @@ def build_statistics_page(page, articles): content += "
\n".join(cited_times_items) content += "\n" - # Lowest pagerank + # Lowest pagerank of written articles + G = networkx.Graph() + for article in articles: + for citation in article.citations: + if citation.article.player is not None: + G.add_edge(article.title, citation.target) + rank_by_article = networkx.pagerank(G) pageranks = reverse_statistics_dict(rank_by_article) bot_ranked = list(enumerate(map(lambda x: x[1], pageranks), start=1))[-10:] # Format the ranks into strings @@ -268,7 +274,6 @@ def build_statistics_page(page, articles): content += "
\n".join(bot_ranked_items) content += "\n" - # Fill in the entry skeleton return page.format(title="Statistics", content=content) From c125bdbe69ae55b9a95e5e99f563277c23574618 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sun, 4 Nov 2018 00:58:07 -0700 Subject: [PATCH 11/40] Fix error in player pagerank statistics --- src/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/build.py b/src/build.py index 1fe8a6e..7e74214 100644 --- a/src/build.py +++ b/src/build.py @@ -220,7 +220,7 @@ def build_statistics_page(page, articles): if article.title in rank_by_article_all else 0) for addendum in article.addendums: addendum_title = "{0.title}-T{0.turn}".format(addendum) - pagerank_by_player[addendum_title] += (rank_by_article_all[addendum_title] + pagerank_by_player[addendum.player] += (rank_by_article_all[addendum_title] if addendum_title in rank_by_article_all else 0) for player in players: pagerank_by_player[player] = round(pagerank_by_player[player], 3) From 6f51bde2cc6ec03a928e76b88cd69a570b94ac7d Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Mon, 5 Nov 2018 09:36:59 -0800 Subject: [PATCH 12/40] Fix phantom exclusion in bottom pageranks --- src/build.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/build.py b/src/build.py index 7e74214..762358b 100644 --- a/src/build.py +++ b/src/build.py @@ -259,13 +259,9 @@ def build_statistics_page(page, articles): content += "\n" # Lowest pagerank of written articles - G = networkx.Graph() - for article in articles: - for citation in article.citations: - if citation.article.player is not None: - G.add_edge(article.title, citation.target) - rank_by_article = networkx.pagerank(G) - pageranks = reverse_statistics_dict(rank_by_article) + exclude = [a.title for a in articles if a.player is None] + rank_by_written_only = {k:v for k,v in rank_by_article.items() if k in exclude} + pageranks = reverse_statistics_dict(rank_by_written_only) bot_ranked = list(enumerate(map(lambda x: x[1], pageranks), start=1))[-10:] # Format the ranks into strings bot_ranked_items = itemize(bot_ranked) From f9c1f02b37727180dd3744390fdcf8eb2427a06d Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Wed, 7 Nov 2018 00:11:37 -0800 Subject: [PATCH 13/40] Add missing not --- src/build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/build.py b/src/build.py index 762358b..1ea357a 100644 --- a/src/build.py +++ b/src/build.py @@ -260,7 +260,7 @@ def build_statistics_page(page, articles): # Lowest pagerank of written articles exclude = [a.title for a in articles if a.player is None] - rank_by_written_only = {k:v for k,v in rank_by_article.items() if k in exclude} + rank_by_written_only = {k:v for k,v in rank_by_article.items() if k not in exclude} pageranks = reverse_statistics_dict(rank_by_written_only) bot_ranked = list(enumerate(map(lambda x: x[1], pageranks), start=1))[-10:] # Format the ranks into strings From 4ab9f1f1ea182662da458d4372fef43efa479d61 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Wed, 7 Nov 2018 23:06:45 -0800 Subject: [PATCH 14/40] Lower margins on index headings --- src/resources/lexicon.css | 1 + 1 file changed, 1 insertion(+) diff --git a/src/resources/lexicon.css b/src/resources/lexicon.css index dee13b1..bb70d3b 100644 --- a/src/resources/lexicon.css +++ b/src/resources/lexicon.css @@ -21,6 +21,7 @@ div#content { position: absolute; right: 0px; left: 226px; max-width: 564px; margin: 5px; } div.contentblock { background-color: #ffffff; box-shadow: 2px 2px 10px #888888; margin-bottom: 5px; padding: 10px; width: auto; border-radius: 5px; } +div.contentblock h3 { margin: 0.3em 0; } a.phantom { color: #cc2200; } div.citeblock a.phantom { font-style: italic; } span.signature { text-align: right; } From 9a8ad419a04038be8aac1c9b741fd8c75abb8d94 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 10 Nov 2018 15:39:11 -0800 Subject: [PATCH 15/40] Rework printable page for new article model --- src/build.py | 83 ++++++++++++++++++++++++++++------------------------ 1 file changed, 44 insertions(+), 39 deletions(-) diff --git a/src/build.py b/src/build.py index 1ea357a..c2f1e22 100644 --- a/src/build.py +++ b/src/build.py @@ -298,49 +298,54 @@ def build_compiled_page(articles, config): """ Builds a page compiling all articles in the Lexicon. """ - # Sort by turn and title - turn_order = sorted( + articles = sorted( articles, - key=lambda a: (a.turn, utils.titlesort(a.title))) + key=lambda a: (utils.titlesort(a.title))) - # Build the content of each article - css = utils.load_resource("lexicon.css") - css += "\n"\ - "body { background: #ffffff; }\n"\ - "sup { vertical-align: top; font-size: 0.6em; }\n" - content = "\n"\ - "\n"\ - "{lexicon}\n"\ - "\n"\ - "\n"\ - "

{lexicon}

".format( - lexicon=config["LEXICON_TITLE"], - css=css) - for article in turn_order: - # Stitch in superscripts for citations + # Write the header + content = "{}"\ + ""\ + "\n".format(config["LEXICON_TITLE"]) + + # Write each article + for article in articles: + # Article title + content += "

{0.title}

".format(article) + + # Article content format_map = { - format_id: "{}{}".format(cite_tuple[0], format_id[1:]) - for format_id, cite_tuple in article.citations.items() + "c"+str(c.id) : c.format("{text}{id}") + for c in article.citations } - article_body = article.content.format(**format_map) - # Stitch a page-break-avoid div around the header and first paragraph - article_body = article_body.replace("

", "

", 1) - # Append the citation block - cite_list = "
\n".join( - "{}. {}\n".format(format_id[1:], cite_tuple[1]) - for format_id, cite_tuple in sorted( - article.citations.items(), - key=lambda t:int(t[0][1:]))) - cite_block = "" if article.player is None else ""\ - "

Citations:
\n"\ - "{}\n

".format(cite_list) - article_block = "
\n"\ - "

{}

\n"\ - "{}\n"\ - "{}\n".format(article.title, article_body, cite_block) - content += article_block + article_content = article.content.format(**format_map) + article_content = article_content.replace("

", "

", 1) + content += article_content + + # Article citations + cite_list = "
".join( + c.format("{id}. {target}") + for c in article.citations) + cite_block = "

{}

".format(cite_list) + content += cite_block + + # Addendums + for addendum in article.addendums: + # Addendum content + format_map = { + "c"+str(c.id) : c.format("{text}{id}") + for c in addendum.citations + } + article_content = addendum.content.format(**format_map) + content += article_content + + # Addendum citations + cite_list = "
".join( + c.format("{id}. {target}") + for c in addendum.citations) + cite_block = "

{}

".format(cite_list) + content += cite_block content += "" return content From aeb195e59581c8c2facf00dcfa126cdb6431b280 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Tue, 13 Nov 2018 13:03:55 -0800 Subject: [PATCH 16/40] Add additional citation checks to editor --- src/build.py | 5 ++++- src/resources/editor.html | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/build.py b/src/build.py index c2f1e22..8408c21 100644 --- a/src/build.py +++ b/src/build.py @@ -427,8 +427,11 @@ def build_all(path_prefix, lexicon_name): writtenArticles = "" phantomArticles = "" for article in articles: + citedby = {'"' + citer.player + '"' for citer in article.citedby} if article.player is None: - phantomArticles += "{{title: \"{0}\"}},".format(article.title.replace("\"", "\\\"")) + phantomArticles += "{{title: \"{0}\", citedby: [{1}]}},".format( + article.title.replace("\"", "\\\""), + ",".join(sorted(citedby))) else: writtenArticles += "{{title: \"{0}\", author: \"{1.player}\"}},".format( article.title.replace("\"", "\\\""), article) diff --git a/src/resources/editor.html b/src/resources/editor.html index 3a13bd5..a5a62da 100644 --- a/src/resources/editor.html +++ b/src/resources/editor.html @@ -20,9 +20,13 @@ ] function updatePreview() { + var articlePlayer = document.getElementById("article-player").value; var articleTitle = document.getElementById("article-title").value; var articleBody = document.getElementById("article-body").value; var previewHtml = "

" + articleTitle + "

\n"; + if (phantomArticles.some(e => (e.title === articleTitle && e.citedby.some(p => (p === articlePlayer))))) { + previewHtml += "

You've cited this article!

" + } previewHtml += parseLexipythonMarkdown(articleBody); document.getElementById("preview").innerHTML = previewHtml; } @@ -164,7 +168,7 @@
- + From 7070d460fcd5c6570cb9579c8c50b89c75c0fce3 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Tue, 13 Nov 2018 13:04:13 -0800 Subject: [PATCH 17/40] Optimize lambdas --- src/resources/editor.html | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/src/resources/editor.html b/src/resources/editor.html index a5a62da..9e3e7c8 100644 --- a/src/resources/editor.html +++ b/src/resources/editor.html @@ -94,23 +94,14 @@ content += "

The following articles will be cited:

\n"; for (var i = 0; i < citationList.length; i++) { citation = citationList[i][0].toString() + ". " + citationList[i][1]; - if (writtenArticles.some( - function (e) { - return (e.title === citationList[i][1]) && (e.author === player); - })) { - content += "

" + citation + " [Written by you!]

"; - } else if (writtenArticles.some( - function (e) { - return (e.title === citationList[i][1]); - })) { - content += "

" + citation + " [Written]"; - } else if (phantomArticles.some( - function (e) { - return (e.title === citationList[i][1]); - })) { - content += "

" + citation + " [Phantom]"; + if (writtenArticles.some(e => (e.title === citationList[i][1]) && (e.author === player))) { + content += "

" + citation + " [Written by you!]

"; + } else if (writtenArticles.some(e => (e.title === citationList[i][1]))) { + content += "

" + citation + " [Written]"; + } else if (phantomArticles.some(e => (e.title === citationList[i][1]))) { + content += "

" + citation + " [Phantom]"; } else { - content += "

" + citation + " [New]"; + content += "

" + citation + " [New]"; } content += "

\n"; } From b15fcbe359759b66005903dd74cf1ee64ad9d1ab Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Wed, 21 Nov 2018 11:42:39 -0800 Subject: [PATCH 18/40] Add undercited articles statistic --- src/build.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/build.py b/src/build.py index 8408c21..758ef38 100644 --- a/src/build.py +++ b/src/build.py @@ -270,6 +270,14 @@ def build_statistics_page(page, articles): content += "
\n".join(bot_ranked_items) content += "\n" + # Undercited articles + undercited = {a.title: len(a.citedby) for a in articles if len(a.citedby) <= 1} + undercited_items = itemize(reverse_statistics_dict(undercited)) + content += "
\n" + content += "Undercited articles:
\n" + content += "
\n".join(undercited_items) + content += "
\n" + # Fill in the entry skeleton return page.format(title="Statistics", content=content) From c9281b6450334c0d26b54fcf48cdd2cbae069864 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Thu, 29 Nov 2018 00:27:54 -0800 Subject: [PATCH 19/40] Fix turn count miscounting addendum lengths --- src/build.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/build.py b/src/build.py index 758ef38..a00979b 100644 --- a/src/build.py +++ b/src/build.py @@ -197,7 +197,12 @@ def build_statistics_page(page, articles): for turn_num in turn_numbers: for article in all_articles: if article.turn <= turn_num: - aggregate[turn_num] += article_length[article.title] + format_map = { + "c"+str(c.id): c.text + for c in article.citations + } + plain_content = article.content.format(**format_map) + aggregate[turn_num] += len(plain_content.split()) aggr_list = [(str(k), [str(v)]) for k,v in aggregate.items()] content += "
\n" content += "Aggregate word count by turn:
\n" From c50676c37de84f4729f4e941923cfe5a3c5786d2 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Fri, 7 Dec 2018 08:58:41 -0800 Subject: [PATCH 20/40] Rewrite statistics calculations again --- src/build.py | 195 +++++++++++++++++++++++++++------------------------ 1 file changed, 104 insertions(+), 91 deletions(-) diff --git a/src/build.py b/src/build.py index a00979b..4530322 100644 --- a/src/build.py +++ b/src/build.py @@ -131,120 +131,129 @@ def itemize(stats_list): def build_statistics_page(page, articles): """ Builds the full HTML of the statistics page. + + The existence of addendum articles complicates how some statistics are + computed. An addendum is an article, with its own author, body, and + citations, but in a Lexicon it exists appended to another article. To handle + this, we distinguish an _article_ from a _page_. An article is a unit parsed + from a single source file. A page is a main article and all addendums under + the same title. """ + min_turn = 0 + max_turn = 0 + article_by_title = {} + page_by_title = {} + players = set() + for main_article in articles: + key = main_article.title + page_by_title[key] = [main_article] + page_by_title[key].extend(main_article.addendums) + for article in [main_article] + main_article.addendums: + # Disambiguate articles by appending turn number to the title + key = "{0.title} (T{0.turn})".format(article) + article_by_title[key] = article + if article.player is not None: + min_turn = min(min_turn, article.turn) + max_turn = max(max_turn, article.turn) + players.add(article.player) content = "" + stat_block = "
{0}
{1}
\n" # Top pages by pagerank - # Compute pagerank for each article + # Compute pagerank for each page, including all articles G = networkx.Graph() - for article in articles: - for citation in article.citations: - G.add_edge(article.title, citation.target) - rank_by_article = networkx.pagerank(G) + for page_title, articles in page_by_title.items(): + for article in articles: + for citation in article.citations: + G.add_edge(page_title, citation.target) + pagerank_by_title = networkx.pagerank(G) + for page_title, articles in page_by_title.items(): + if page_title not in pagerank_by_title: + pagerank_by_title[page_title] = 0 # Get the top ten articles by pagerank - top_pageranks = reverse_statistics_dict(rank_by_article)[:10] + top_pageranks = reverse_statistics_dict(pagerank_by_title)[:10] # Replace the pageranks with ordinals top_ranked = enumerate(map(lambda x: x[1], top_pageranks), start=1) # Format the ranks into strings top_ranked_items = itemize(top_ranked) # Write the statistics to the page - content += "
\n" - content += "Top 10 articles by page rank:
\n" - content += "
\n".join(top_ranked_items) - content += "
\n" + content += stat_block.format( + "Top 10 articles by page rank:", + "
".join(top_ranked_items)) - # Top number of citations made - citations_made = {article.title : len(article.citations) for article in articles} - top_citations = reverse_statistics_dict(citations_made)[:3] + # Pages cited/cited by + pages_cited = {page_title: set() for page_title in page_by_title.keys()} + pages_cited_by = {page_title: set() for page_title in page_by_title.keys()} + for page_title, articles in page_by_title.items(): + for article in articles: + for citation in article.citations: + pages_cited[page_title].add(citation.target) + pages_cited_by[citation.target].add(page_title) + for page_title, cite_titles in pages_cited.items(): + pages_cited[page_title] = len(cite_titles) + for page_title, cite_titles in pages_cited_by.items(): + pages_cited_by[page_title] = len(cite_titles) + + top_citations = reverse_statistics_dict(pages_cited)[:3] top_citations_items = itemize(top_citations) - content += "
\n" - content += "Top articles by citations made:
\n" - content += "
\n".join(top_citations_items) - content += "
\n" - - # Top number of times cited - citations_to = {article.title : len(article.citedby) for article in articles} - top_cited = reverse_statistics_dict(citations_to)[:3] + content += stat_block.format( + "Cited the most pages:", + "
".join(top_citations_items)) + top_cited = reverse_statistics_dict(pages_cited_by)[:3] top_cited_items = itemize(top_cited) - content += "
\n" - content += "Most cited articles:
\n" - content += "
\n".join(top_cited_items) - content += "
\n" + content += stat_block.format( + "Cited by the most pages:", + "
".join(top_cited_items)) - # Top article length, roughly by words - article_length = {} - for article in articles: + # Top article length + article_length_by_title = {} + cumulative_article_length_by_turn = { + turn_num: 0 + for turn_num in range(min_turn, max_turn + 1) + } + for article_title, article in article_by_title.items(): format_map = { "c"+str(c.id): c.text for c in article.citations } plain_content = article.content.format(**format_map) - article_length[article.title] = len(plain_content.split()) - top_length = reverse_statistics_dict(article_length)[:3] + word_count = len(plain_content.split()) + article_length_by_title[article_title] = word_count + for turn_num in range(min_turn, max_turn + 1): + if article.turn <= turn_num: + cumulative_article_length_by_turn[turn_num] += word_count + top_length = reverse_statistics_dict(article_length_by_title)[:3] top_length_items = itemize(top_length) - content += "
\n" - content += "Longest articles:
\n" - content += "
\n".join(top_length_items) - content += "
\n" + content += stat_block.format( + "Longest articles:", + "
".join(top_length_items)) # Total word count - all_articles = [] - for article in articles: - all_articles.append(article) - all_articles.extend(article.addendums) - turn_numbers = set([a.turn for a in articles if a.player is not None]) - aggregate = {num: 0 for num in turn_numbers} - for turn_num in turn_numbers: - for article in all_articles: - if article.turn <= turn_num: - format_map = { - "c"+str(c.id): c.text - for c in article.citations - } - plain_content = article.content.format(**format_map) - aggregate[turn_num] += len(plain_content.split()) - aggr_list = [(str(k), [str(v)]) for k,v in aggregate.items()] - content += "
\n" - content += "Aggregate word count by turn:
\n" - content += "
\n".join(itemize(aggr_list)) - content += "
\n" + len_list = [(str(k), [str(v)]) for k,v in cumulative_article_length_by_turn.items()] + content += stat_block.format( + "Aggregate word count by turn:", + "
".join(itemize(len_list))) # Player pageranks - # Add addendums and recompute pagerank - for article in articles: - for addendum in article.addendums: - for citation in addendum.citations: - addendum_title = "{0.title}-T{0.turn}".format(addendum) - G.add_edge(addendum_title, citation.target) - rank_by_article_all = networkx.pagerank(G) - players = sorted(set([article.player for article in articles if article.player is not None])) pagerank_by_player = {player: 0 for player in players} - for article in articles: - if article.player is not None: - pagerank_by_player[article.player] += (rank_by_article_all[article.title] - if article.title in rank_by_article_all else 0) - for addendum in article.addendums: - addendum_title = "{0.title}-T{0.turn}".format(addendum) - pagerank_by_player[addendum.player] += (rank_by_article_all[addendum_title] - if addendum_title in rank_by_article_all else 0) - for player in players: - pagerank_by_player[player] = round(pagerank_by_player[player], 3) + for page_title, articles in page_by_title.items(): + page_author = articles[0].player + if page_author is not None: + pagerank_by_player[page_author] += pagerank_by_title[page_title] + for player, pagerank in pagerank_by_player.items(): + pagerank_by_player[player] = round(pagerank, 3) player_rank = reverse_statistics_dict(pagerank_by_player) player_rank_items = itemize(player_rank) - content += "
\n" - content += "Player total page rank:
\n" - content += "
\n".join(player_rank_items) - content += "
\n" + content += stat_block.format( + "Player aggregate page rank:", + "
".join(player_rank_items)) # Player citations made - cite_count_by_player = {player: 0 for player in players} - for article in articles: + pages_cited_by_player = {player: 0 for player in players} + for article_title, article in article_by_title.items(): if article.player is not None: - unique_citations = set([a.target for a in article.citations]) - cite_count_by_player[article.player] += len(unique_citations) - for addendum in article.addendums: - cite_count_by_player[addendum.player] += len(addendum.citations) - player_cites_made_ranks = reverse_statistics_dict(cite_count_by_player) + pages_cited_by_player[article.player] += len(article.citations) + player_cites_made_ranks = reverse_statistics_dict(pages_cited_by_player) player_cites_made_items = itemize(player_cites_made_ranks) content += "
\n" content += "Citations made by player:
\n" @@ -252,20 +261,21 @@ def build_statistics_page(page, articles): content += "
\n" # Player cited count - cited_times = {player : 0 for player in players} - for article in articles: - if article.player is not None: - cited_times[article.player] += len(article.citedby) - cited_times_ranked = reverse_statistics_dict(cited_times) + pages_cited_by_by_player = {player: 0 for player in players} + for page_title, articles in page_by_title.items(): + page_author = articles[0].player + if page_author is not None: + pages_cited_by_by_player[page_author] += len(articles[0].citedby) + cited_times_ranked = reverse_statistics_dict(pages_cited_by_by_player) cited_times_items = itemize(cited_times_ranked) content += "
\n" - content += "Citations made to player:
\n" + content += "Citations made to article by player:
\n" content += "
\n".join(cited_times_items) content += "
\n" # Lowest pagerank of written articles exclude = [a.title for a in articles if a.player is None] - rank_by_written_only = {k:v for k,v in rank_by_article.items() if k not in exclude} + rank_by_written_only = {k:v for k,v in pagerank_by_title.items() if k not in exclude} pageranks = reverse_statistics_dict(rank_by_written_only) bot_ranked = list(enumerate(map(lambda x: x[1], pageranks), start=1))[-10:] # Format the ranks into strings @@ -276,7 +286,10 @@ def build_statistics_page(page, articles): content += "
\n" # Undercited articles - undercited = {a.title: len(a.citedby) for a in articles if len(a.citedby) <= 1} + undercited = { + page_title: len(articles[0].citedby) + for page_title, articles in page_by_title.items() + if len(articles[0].citedby) < 2} undercited_items = itemize(reverse_statistics_dict(undercited)) content += "
\n" content += "Undercited articles:
\n" From 312af310e0b4d58671a8ad4e7975a427d8014b1b Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sun, 10 Mar 2019 17:36:24 -0700 Subject: [PATCH 21/40] Update gitignore --- .gitignore | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index d1f8edc..73ac0e3 100644 --- a/.gitignore +++ b/.gitignore @@ -101,4 +101,10 @@ ENV/ .mypy_cache/ # Ignore directories in lexicon/ -lexicon/*/ \ No newline at end of file +lexicon/*/ + +# Ignore vscode +.vscode + +# Ignore a scratch directory +scratch/ \ No newline at end of file From f709efefb8c3f7c7a738d01d884fcb2b5f486860 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Mon, 22 Apr 2019 12:52:29 -0700 Subject: [PATCH 22/40] Add WIP latex export code --- src/build.py | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/src/build.py b/src/build.py index 4530322..9d155e2 100644 --- a/src/build.py +++ b/src/build.py @@ -376,6 +376,107 @@ def build_compiled_page(articles, config): content += "" return content +def latex_from_markdown(raw_content): + content = "" + headers = raw_content.split('\n', 3) + player_header, turn_header, title_header, content_raw = headers + if not turn_header.startswith("# Turn:"): + print("Turn header missing or corrupted") + return None + turn = int(turn_header[7:].strip()) + if not title_header.startswith("# Title:"): + print("Title header missing or corrupted") + return None + title = utils.titlecase(title_header[8:]) + #content += "\\label{{{}}}\n".format(title) + #content += "\\section*{{{}}}\n\n".format(title) + # Parse content + paras = re.split("\n\n+", content_raw.strip()) + for para in paras: + # Escape things + para = re.sub("—", "---", para) + para = re.sub("&", "\\&", para) + para = re.sub(r"\"(?=\w)", "``", para) + para = re.sub(r"(?<=\w)\"", "''", para) + # Replace bold and italic marks with commands + para = re.sub(r"//([^/]+)//", r"\\textit{\1}", para) + para = re.sub(r"\*\*([^*]+)\*\*", r"\\textbf{\1}", para) + # Footnotify citations + link_match = re.search(r"\[\[(([^|\[\]]+)\|)?([^|\[\]]+)\]\]", para) + while link_match: + # Identify the citation text and cited article + cite_text = link_match.group(2) if link_match.group(2) else link_match.group(3) + cite_title = utils.titlecase(re.sub(r"\s+", " ", link_match.group(3))) + # Stitch the title into a footnote + para = (para[:link_match.start(0)] + cite_text + "\\footnote{" + + cite_title + + ", p. \\pageref{" + str(hash(cite_title)) + "}" + + "}" + para[link_match.end(0):]) + link_match = re.search(r"\[\[(([^|\[\]]+)\|)?([^|\[\]]+)\]\]", para) + # Convert signature to right-aligned + if para[:1] == '~': + para = "\\begin{flushright}\n" + para[1:] + "\n\\end{flushright}\n\n" + else: + para = para + "\n\n" + content += para + return title, turn, content + +def latex_from_directory(directory): + articles = {} + for filename in os.listdir(directory): + path = os.path.join(directory, filename) + # Read only .txt files + if filename[-4:] == ".txt": + with open(path, "r", encoding="utf8") as src_file: + raw = src_file.read() + title, turn, latex = latex_from_markdown(raw) + if title not in articles: + articles[title] = {} + articles[title][turn] = latex + + # Write the preamble + content = "\\documentclass[12pt,a4paper,twocolumn,twoside]{article}\n"\ + "\\usepackage[perpage]{footmisc}\n"\ + "\\begin{document}\n"\ + "\n" + + for title in sorted(articles.keys(), key=lambda t: utils.titlesort(t)): + under_title = articles[title] + turns = sorted(under_title.keys()) + latex = under_title[turns[0]] + + # Section header + content += "\\label{{{}}}\n".format(hash(title)) + content += "\\section*{{{}}}\n\n".format(title) + + # Section content + #format_map = { + # "c"+str(c.id) : c.format("\\footnote{{{target}}}") + # for c in article.citations + #} + #article_content = article.content.format(**format_map) + content += latex + + # Addendums + for turn in turns[1:]: + #content += "\\vspace{6pt}\n\\hrule\n\\vspace{6pt}\n\n" + content += "\\begin{center}\n$\\ast$~$\\ast$~$\\ast$\n\\end{center}\n\n" + + latex = under_title[turn] + #format_map = { + # "c"+str(c.id) : c.format("\\footnote{{{target}}}") + # for c in addendum.citations + #} + #article_content = addendum.content.format(**format_map) + content += latex + + content += "\\end{document}" + + content = re.sub(r"\"(?=\w)", "``", content) + content = re.sub(r"(?<=\w)\"", "''", content) + + return content + def build_all(path_prefix, lexicon_name): """ Builds all browsable articles and pages in the Lexicon. From 014ff075c1f2e8bd0c8fe34ba6e72d3c51f393e4 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Mon, 22 Apr 2019 13:46:52 -0700 Subject: [PATCH 23/40] Config changes for the next round of updates --- src/resources/lexicon.cfg | 56 ++++++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/src/resources/lexicon.cfg b/src/resources/lexicon.cfg index 7185b72..8cbe4f0 100644 --- a/src/resources/lexicon.cfg +++ b/src/resources/lexicon.cfg @@ -1,11 +1,6 @@ # LEXIPYTHON CONFIG FILE # # This file defines the configuration values for an instance of Lexipython. -# Configuration values are written as: ->>>CONFIG_NAME>>> -value -<<>>LEXICON_TITLE>>> @@ -29,33 +24,46 @@ logo.png

Put session information here, like the index grouping and turn count, where to send completed entries, index assignments, turn schedule, and so on.

<<>>INDEX_LIST>>> -ABC -DEF -GHI -JKL -MNO -PQRS -TUV -WXYZ +char:ABC +char:DEF +char:GHI +char:JKL +char:MNO +char:PQRS +char:TUV +char:WXYZ +etc:&c. <<>>DEFAULT_SORT>>> -?byturn +index <<>>ALLOW_ADDENDA>>> +False +<<>>GRAPHVIZ_FILE>>> <<>>PRINTABLE_FILE>>> -<<>>SEARCHABLE_FILE>>> +<< Date: Mon, 22 Apr 2019 13:47:16 -0700 Subject: [PATCH 24/40] Make module more module-like --- lexipython.py => lexipython/__main__.py | 0 {src => lexipython}/article.py | 0 {src => lexipython}/build.py | 0 {src => lexipython}/resources/contents.html | 0 {src => lexipython}/resources/editor.html | 0 {src => lexipython}/resources/entry-page.html | 0 {src => lexipython}/resources/example-page.txt | 0 {src => lexipython}/resources/formatting.html | 0 {src => lexipython}/resources/lexicon.cfg | 0 {src => lexipython}/resources/lexicon.css | 0 {src => lexipython}/resources/page-skeleton.html | 0 {src => lexipython}/resources/redirect.html | 0 {src => lexipython}/resources/rules.html | 0 {src => lexipython}/utils.py | 0 14 files changed, 0 insertions(+), 0 deletions(-) rename lexipython.py => lexipython/__main__.py (100%) mode change 100755 => 100644 rename {src => lexipython}/article.py (100%) rename {src => lexipython}/build.py (100%) rename {src => lexipython}/resources/contents.html (100%) rename {src => lexipython}/resources/editor.html (100%) rename {src => lexipython}/resources/entry-page.html (100%) rename {src => lexipython}/resources/example-page.txt (100%) rename {src => lexipython}/resources/formatting.html (100%) rename {src => lexipython}/resources/lexicon.cfg (100%) rename {src => lexipython}/resources/lexicon.css (100%) rename {src => lexipython}/resources/page-skeleton.html (100%) rename {src => lexipython}/resources/redirect.html (100%) rename {src => lexipython}/resources/rules.html (100%) rename {src => lexipython}/utils.py (100%) diff --git a/lexipython.py b/lexipython/__main__.py old mode 100755 new mode 100644 similarity index 100% rename from lexipython.py rename to lexipython/__main__.py diff --git a/src/article.py b/lexipython/article.py similarity index 100% rename from src/article.py rename to lexipython/article.py diff --git a/src/build.py b/lexipython/build.py similarity index 100% rename from src/build.py rename to lexipython/build.py diff --git a/src/resources/contents.html b/lexipython/resources/contents.html similarity index 100% rename from src/resources/contents.html rename to lexipython/resources/contents.html diff --git a/src/resources/editor.html b/lexipython/resources/editor.html similarity index 100% rename from src/resources/editor.html rename to lexipython/resources/editor.html diff --git a/src/resources/entry-page.html b/lexipython/resources/entry-page.html similarity index 100% rename from src/resources/entry-page.html rename to lexipython/resources/entry-page.html diff --git a/src/resources/example-page.txt b/lexipython/resources/example-page.txt similarity index 100% rename from src/resources/example-page.txt rename to lexipython/resources/example-page.txt diff --git a/src/resources/formatting.html b/lexipython/resources/formatting.html similarity index 100% rename from src/resources/formatting.html rename to lexipython/resources/formatting.html diff --git a/src/resources/lexicon.cfg b/lexipython/resources/lexicon.cfg similarity index 100% rename from src/resources/lexicon.cfg rename to lexipython/resources/lexicon.cfg diff --git a/src/resources/lexicon.css b/lexipython/resources/lexicon.css similarity index 100% rename from src/resources/lexicon.css rename to lexipython/resources/lexicon.css diff --git a/src/resources/page-skeleton.html b/lexipython/resources/page-skeleton.html similarity index 100% rename from src/resources/page-skeleton.html rename to lexipython/resources/page-skeleton.html diff --git a/src/resources/redirect.html b/lexipython/resources/redirect.html similarity index 100% rename from src/resources/redirect.html rename to lexipython/resources/redirect.html diff --git a/src/resources/rules.html b/lexipython/resources/rules.html similarity index 100% rename from src/resources/rules.html rename to lexipython/resources/rules.html diff --git a/src/utils.py b/lexipython/utils.py similarity index 100% rename from src/utils.py rename to lexipython/utils.py From 95d2cddf1794646cfaf7a415b6e3ae92d6019664 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Mon, 22 Apr 2019 14:14:59 -0700 Subject: [PATCH 25/40] Adjust paths for package org --- lexipython/__main__.py | 6 +++--- lexipython/article.py | 2 +- lexipython/build.py | 4 ++-- lexipython/resources/__init__.py | 0 lexipython/utils.py | 6 ++++-- 5 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 lexipython/resources/__init__.py diff --git a/lexipython/__main__.py b/lexipython/__main__.py index 89ebfe6..d866e79 100644 --- a/lexipython/__main__.py +++ b/lexipython/__main__.py @@ -8,9 +8,9 @@ import argparse import os import re import json -from src.article import LexiconArticle -from src import build -from src import utils +from article import LexiconArticle +import build +import utils def is_lexicon(name): """ diff --git a/lexipython/article.py b/lexipython/article.py index 6df114d..3b645b8 100644 --- a/lexipython/article.py +++ b/lexipython/article.py @@ -1,7 +1,7 @@ import os import sys import re -import src.utils as utils +import utils class LexiconCitation: """ diff --git a/lexipython/build.py b/lexipython/build.py index 9d155e2..75903d0 100644 --- a/lexipython/build.py +++ b/lexipython/build.py @@ -5,8 +5,8 @@ import io # For writing pages out as UTF-8 import networkx # For pagerank analytics from collections import defaultdict # For rank inversion in statistics -from src import utils -from src.article import LexiconArticle +import utils +from article import LexiconArticle class LexiconPage: """ diff --git a/lexipython/resources/__init__.py b/lexipython/resources/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lexipython/utils.py b/lexipython/utils.py index 9d2ea2c..2bc2e7b 100644 --- a/lexipython/utils.py +++ b/lexipython/utils.py @@ -1,6 +1,7 @@ import os import re from urllib import parse +import pkg_resources # Short utility functions for handling titles @@ -37,8 +38,9 @@ def titlesort(s): def load_resource(filename, cache={}): """Loads files from the resources directory with caching.""" if filename not in cache: - with open(os.path.join("src", "resources", filename), "r", encoding="utf-8") as f: - cache[filename] = f.read() + binary = pkg_resources.resource_string("resources", filename) + unistr = binary.decode("utf-8") + cache[filename] = unistr return cache[filename] def load_config(name): From 1f702b5af40bb8015adfa1f387113b9083b0d649 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Mon, 22 Apr 2019 14:56:47 -0700 Subject: [PATCH 26/40] Update config check to use default values dynamically --- lexipython/utils.py | 61 ++++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/lexipython/utils.py b/lexipython/utils.py index 2bc2e7b..5bae5ef 100644 --- a/lexipython/utils.py +++ b/lexipython/utils.py @@ -1,5 +1,6 @@ import os import re +import io from urllib import parse import pkg_resources @@ -43,35 +44,43 @@ def load_resource(filename, cache={}): cache[filename] = unistr return cache[filename] +def parse_config_file(f): + """Parses a Lexipython config file.""" + config = {} + line = f.readline() + while line: + # Skim lines until a value definition begins + conf_match = re.match(r">>>([^>]+)>>>\s+", line) + if not conf_match: + line = f.readline() + continue + # Accumulate the conf value until the value ends + conf = conf_match.group(1) + conf_value = "" + line = f.readline() + conf_match = re.match(r"<<<{0}<<<\s+".format(conf), line) + while line and not conf_match: + conf_value += line + line = f.readline() + conf_match = re.match(r"<<<{0}<<<\s+".format(conf), line) + if not line: + raise EOFError("Reached EOF while reading config value {}".format(conf)) + config[conf] = conf_value.strip() + return config + def load_config(name): """ Loads values from a Lexicon's config file. """ - config = {} with open(os.path.join("lexicon", name, "lexicon.cfg"), "r", encoding="utf8") as f: - line = f.readline() - while line: - # Skim lines until a value definition begins - conf_match = re.match(r">>>([^>]+)>>>\s+", line) - if not conf_match: - line = f.readline() - continue - # Accumulate the conf value until the value ends - conf = conf_match.group(1) - conf_value = "" - line = f.readline() - conf_match = re.match(r"<<<{0}<<<\s+".format(conf), line) - while line and not conf_match: - conf_value += line - line = f.readline() - conf_match = re.match(r"<<<{0}<<<\s+".format(conf), line) - if not line: - # TODO Not this - raise SystemExit("Reached EOF while reading config value {}".format(conf)) - config[conf] = conf_value.strip() - # Check that all necessary values were configured - for config_value in ['LEXICON_TITLE', 'PROMPT', 'SESSION_PAGE', "INDEX_LIST"]: - if config_value not in config: - # TODO Not this either - raise SystemExit("Error: {} not set in lexipython.cfg".format(config_value)) + config = parse_config_file(f) + # Check that no values are missing that are present in the default config + with io.StringIO(load_resource("lexicon.cfg")) as f: + default_config = parse_config_file(f) + missing_keys = [] + for key in default_config.keys(): + if key not in config: + missing_keys.append(key) + if missing_keys: + raise KeyError("{} missing config values for: {}".format(name, " ".join(missing_keys))) return config From 3ac0b2c73815b5b896342aa992e0ccfae021db5b Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Tue, 23 Apr 2019 16:20:23 -0700 Subject: [PATCH 27/40] Improve index sorting options --- lexipython/build.py | 120 ++++++++++++++++++++++++----- lexipython/resources/contents.html | 64 ++++++++++----- lexipython/resources/lexicon.cfg | 18 +++-- 3 files changed, 155 insertions(+), 47 deletions(-) diff --git a/lexipython/build.py b/lexipython/build.py index 75903d0..a8ea3f5 100644 --- a/lexipython/build.py +++ b/lexipython/build.py @@ -28,11 +28,21 @@ class LexiconPage: total_kwargs = {**self.kwargs, **kwargs} return self.skeleton.format(**total_kwargs) -def build_contents_page(page, articles, index_list): +def article_matches_index(index_type, pattern, article): + if index_type == "char": + return utils.titlesort(article.title)[0].upper() in pattern.upper() + if index_type == "prefix": + return article.title.startswith(pattern) + if index_type == "etc": + return True + raise ValueError("Unknown index type: '{}'".format(index_type)) + +def build_contents_page(config, page, articles): """ Builds the full HTML of the contents page. """ content = "
" + # Head the contents page with counts of written and phantom articles phantom_count = len([article for article in articles if article.player is None]) if phantom_count == 0: @@ -40,32 +50,68 @@ def build_contents_page(page, articles, index_list): else: content += "

There are {0} entries, {1} written and {2} phantom.

\n".format( len(articles), len(articles) - phantom_count, phantom_count) + # Prepare article links link_by_title = {article.title : "{0}".format( article.title, article.title_filesafe, " class=\"phantom\"" if article.player is None else "") for article in articles} - # Write the articles in alphabetical order - content += utils.load_resource("contents.html") - content += "
\n
    \n" - indices = index_list.split("\n") - alphabetical_order = sorted( + + # Determine index order + indices = config['INDEX_LIST'].split("\n") + index_by_pri = {} + index_list_order = [] + for index in indices: + match = re.match(r"([^[:]+)(\[([-\d]+)\])?:(.+)", index) + index_type = match.group(1) + pattern = match.group(4) + try: + pri_s = match.group(3) + pri = int(pri_s) if pri_s else 0 + except: + raise TypeError("Could not parse index pri '{}' in '{}'".format(pri_s, index)) + if pri not in index_by_pri: + index_by_pri[pri] = [] + index_by_pri[pri].append((index_type, pattern)) + index_list_order.append(pattern) + + # Assign articles to indices + articles_by_index = {pattern: [] for pattern in index_list_order} + titlesort_order = sorted( articles, key=lambda a: utils.titlesort(a.title)) - check_off = list(alphabetical_order) - for index_str in indices: - content += "

    {0}

    \n".format(index_str) - for article in alphabetical_order: - if (utils.titlesort(article.title)[0].upper() in index_str): - check_off.remove(article) - content += "
  • {}
  • \n".format(link_by_title[article.title]) - if len(check_off) > 0: - content += "

    &c.

    \n" - for article in check_off: + for article in titlesort_order: + # Find the first index that matches + matched = False + for pri, indices in sorted(index_by_pri.items(), reverse=True): + for index_type, pattern in indices: + # Try to match the index + if article_matches_index(index_type, pattern, article): + articles_by_index[pattern].append(article) + matched = True + # Break out once a match is found + if matched: + break + if matched: + break + if not matched: + raise KeyError("No index matched article '{}'".format(article.title)) + + # Write index order div + content += utils.load_resource("contents.html") + content += "
    \n
      \n".format( + "block" if config["DEFAULT_SORT"] == "index" else "none") + for pattern in index_list_order: + # Write the index header + content += "

      {0}

      \n".format(pattern) + # Write all matches articles + for article in articles_by_index[pattern]: content += "
    • {}
    • \n".format(link_by_title[article.title]) content += "
    \n
    \n" - # Write the articles in turn order - content += "
    \n
      \n" + + # Write turn order div + content += "
      \n
        \n".format( + "block" if config["DEFAULT_SORT"] == "turn" else "none") turn_numbers = [article.turn for article in articles if article.player is not None] first_turn, last_turn = min(turn_numbers), max(turn_numbers) turn_order = sorted( @@ -83,6 +129,31 @@ def build_contents_page(page, articles, index_list): for article in check_off: content += "
      • {}
      • \n".format(link_by_title[article.title]) content += "
      \n
      \n" + + # Write by-player div + content += "
      \n
        \n".format( + "block" if config["DEFAULT_SORT"] == "player" else "none") + articles_by_player = {} + extant_phantoms = False + for article in turn_order: + if article.player is not None: + if article.player not in articles_by_player: + articles_by_player[article.player] = [] + articles_by_player[article.player].append(article) + else: + extant_phantoms = True + for player, player_articles in sorted(articles_by_player.items()): + content += "

        {0}

        \n".format(player) + for article in player_articles: + content += "
      • {}
      • \n".format(link_by_title[article.title]) + if extant_phantoms: + content += "

        Unwritten

        \n" + for article in titlesort_order: + if article.player is None: + content += "
      • {}
      • \n".format(link_by_title[article.title]) + content += "
      \n
      \n" + + content += "
    \n" # Fill in the page skeleton return page.format(title="Index", content=content) @@ -477,6 +548,15 @@ def latex_from_directory(directory): return content +def parse_sort_type(sort): + if sort in "?byindex": + return "?byindex" + if sort in "?byturn": + return "?byturn" + if sort in "?byplayer": + return "?byplayer" + return "" + def build_all(path_prefix, lexicon_name): """ Builds all browsable articles and pages in the Lexicon. @@ -490,7 +570,7 @@ def build_all(path_prefix, lexicon_name): lexicon=config["LEXICON_TITLE"], logo=config["LOGO_FILENAME"], prompt=config["PROMPT"], - sort=config["DEFAULT_SORT"]) + sort=parse_sort_type(config["DEFAULT_SORT"])) # Parse the written articles articles = LexiconArticle.parse_from_directory(os.path.join(lex_path, "src")) # Once they've been populated, the articles list has the titles of all articles @@ -528,7 +608,7 @@ def build_all(path_prefix, lexicon_name): # Write default pages print("Writing default pages...") with open(pathto("contents", "index.html"), "w", encoding="utf-8") as f: - f.write(build_contents_page(page, articles, config["INDEX_LIST"])) + f.write(build_contents_page(config, page, articles)) print(" Wrote Contents") with open(pathto("rules", "index.html"), "w", encoding="utf-8") as f: f.write(build_rules_page(page)) diff --git a/lexipython/resources/contents.html b/lexipython/resources/contents.html index a7c2543..eb3b850 100644 --- a/lexipython/resources/contents.html +++ b/lexipython/resources/contents.html @@ -1,28 +1,54 @@ diff --git a/lexipython/resources/lexicon.cfg b/lexipython/resources/lexicon.cfg index 8cbe4f0..66d0d35 100644 --- a/lexipython/resources/lexicon.cfg +++ b/lexipython/resources/lexicon.cfg @@ -24,16 +24,18 @@ logo.png

    Put session information here, like the index grouping and turn count, where to send completed entries, index assignments, turn schedule, and so on.

    <<>>INDEX_LIST>>> char:ABC char:DEF @@ -47,7 +49,7 @@ etc:&c. <<>>DEFAULT_SORT>>> index << Date: Tue, 23 Apr 2019 23:50:45 -0700 Subject: [PATCH 28/40] Move prev/next to the end of articles with addenda --- lexipython/article.py | 45 +++++++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/lexipython/article.py b/lexipython/article.py index 3b645b8..2a6da84 100644 --- a/lexipython/article.py +++ b/lexipython/article.py @@ -221,8 +221,7 @@ class LexiconArticle: content += "

    {}

    {}
    \n".format( self.title, main_body) # Build the main citation content block - main_citations = self.build_default_citeblock( - self.prev_article, self.next_article) + main_citations = self.build_default_citeblock() if main_citations: content += "
    {}
    \n".format( main_citations) @@ -230,10 +229,16 @@ class LexiconArticle: for addendum in self.addendums: add_body = addendum.build_default_article_body() content += "
    {}
    \n".format(add_body) - add_citations = addendum.build_default_citeblock(None, None) + add_citations = addendum.build_default_citeblock() if add_citations: content += "
    {}
    \n".format( add_citations) + # Build the prev/next block + prev_next = self.build_prev_next_block( + self.prev_article, self.next_article) + if prev_next: + content += "
    {}
    \n".format( + prev_next) return content def build_default_article_body(self): @@ -247,23 +252,12 @@ class LexiconArticle: } return self.content.format(**format_map) - def build_default_citeblock(self, prev_article, next_article): + def build_default_citeblock(self): """ - Builds the contents of a citation contentblock. For each defined target, - links the target page as Previous or Next. Prev/next and cites/citedby - elements are not included if they have no content. + Builds the contents of a citation contentblock. Skips sections with no + content. """ content = "" - # Prev/next links: - if next_article is not None or prev_article is not None: - prev_link = ("← Previous".format( - prev_article) - if prev_article is not None else "") - next_link = ("Next →".format( - next_article) - if next_article is not None else "") - content += "
# Player:
# Turn:
\n\n\n
{}{}
\n".format( - prev_link, next_link) # Citations cites_titles = set() cites_links = [] @@ -289,3 +283,20 @@ class LexiconArticle: content += "

Cited by: {}

\n".format(citedby_str) return content + + def build_prev_next_block(self, prev_article, next_article): + """ + For each defined target, links the target page as Previous or Next. + """ + content = "" + # Prev/next links: + if next_article is not None or prev_article is not None: + prev_link = ("← Previous".format( + prev_article) + if prev_article is not None else "") + next_link = ("Next →".format( + next_article) + if next_article is not None else "") + content += "\n\n\n
{}{}
\n".format( + prev_link, next_link) + return content From 8773f6b58f127057a1d12bb03a0ae386e5cebdbe Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Tue, 23 Apr 2019 23:56:59 -0700 Subject: [PATCH 29/40] Add title-turn uniqueness check --- lexipython/article.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lexipython/article.py b/lexipython/article.py index 2a6da84..4490809 100644 --- a/lexipython/article.py +++ b/lexipython/article.py @@ -172,6 +172,13 @@ class LexiconArticle: Fills out fields on articles that require other articles for context. Creates phantom articles. """ + # Preliminary assertion that title/turn is unique + keys = set() + for article in lexicon_articles: + if (article.title, article.turn) in keys: + raise ValueError("Found two articles with title '{}' and turn '{}'".format( + article.title, article.turn)) + keys.add((article.title, article.turn)) # Sort out which articles are addendums and which titles are phantoms written_titles = set() cited_titles = set() From b3875fc7da92cb678f8f37d13336ac584d85510f Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 18 May 2019 00:16:46 -0700 Subject: [PATCH 30/40] Statistics refactor and config hookup --- lexipython/build.py | 211 +++----------------- lexipython/resources/lexicon.cfg | 15 ++ lexipython/statistics.py | 317 +++++++++++++++++++++++++++++++ 3 files changed, 354 insertions(+), 189 deletions(-) create mode 100644 lexipython/statistics.py diff --git a/lexipython/build.py b/lexipython/build.py index a8ea3f5..45ec841 100644 --- a/lexipython/build.py +++ b/lexipython/build.py @@ -1,12 +1,12 @@ -import sys # For argv and stderr +# Standard library imports import os # For reading directories import re # For parsing lex content -import io # For writing pages out as UTF-8 -import networkx # For pagerank analytics -from collections import defaultdict # For rank inversion in statistics +# Application imports import utils from article import LexiconArticle +from statistics import LexiconStatistics + class LexiconPage: """ @@ -181,191 +181,24 @@ def build_session_page(page, session_content): content = "
{}
".format(session_content) return page.format(title="Session", content=content) -def reverse_statistics_dict(stats, reverse=True): - """ - Transforms a dictionary mapping titles to a value into a list of values - and lists of titles. The list is sorted by the value, and the titles are - sorted alphabetically. - """ - rev = {} - for key, value in stats.items(): - if value not in rev: - rev[value] = [] - rev[value].append(key) - for key, value in rev.items(): - rev[key] = sorted(value, key=lambda t: utils.titlesort(t)) - return sorted(rev.items(), key=lambda x:x[0], reverse=reverse) +def build_statistics_page(config, page, articles): + # Read the config file for which stats to publish. + lines = config['STATISTICS'].split("\n") + stats = [] + for line in lines: + stat, toggle = line.split() + if toggle == "on": + stats.append("stat_" + stat) -def itemize(stats_list): - return map(lambda x: "{0} – {1}".format(x[0], "; ".join(x[1])), stats_list) - -def build_statistics_page(page, articles): - """ - Builds the full HTML of the statistics page. - - The existence of addendum articles complicates how some statistics are - computed. An addendum is an article, with its own author, body, and - citations, but in a Lexicon it exists appended to another article. To handle - this, we distinguish an _article_ from a _page_. An article is a unit parsed - from a single source file. A page is a main article and all addendums under - the same title. - """ - min_turn = 0 - max_turn = 0 - article_by_title = {} - page_by_title = {} - players = set() - for main_article in articles: - key = main_article.title - page_by_title[key] = [main_article] - page_by_title[key].extend(main_article.addendums) - for article in [main_article] + main_article.addendums: - # Disambiguate articles by appending turn number to the title - key = "{0.title} (T{0.turn})".format(article) - article_by_title[key] = article - if article.player is not None: - min_turn = min(min_turn, article.turn) - max_turn = max(max_turn, article.turn) - players.add(article.player) - content = "" - stat_block = "
{0}
{1}
\n" - - # Top pages by pagerank - # Compute pagerank for each page, including all articles - G = networkx.Graph() - for page_title, articles in page_by_title.items(): - for article in articles: - for citation in article.citations: - G.add_edge(page_title, citation.target) - pagerank_by_title = networkx.pagerank(G) - for page_title, articles in page_by_title.items(): - if page_title not in pagerank_by_title: - pagerank_by_title[page_title] = 0 - # Get the top ten articles by pagerank - top_pageranks = reverse_statistics_dict(pagerank_by_title)[:10] - # Replace the pageranks with ordinals - top_ranked = enumerate(map(lambda x: x[1], top_pageranks), start=1) - # Format the ranks into strings - top_ranked_items = itemize(top_ranked) - # Write the statistics to the page - content += stat_block.format( - "Top 10 articles by page rank:", - "
".join(top_ranked_items)) - - # Pages cited/cited by - pages_cited = {page_title: set() for page_title in page_by_title.keys()} - pages_cited_by = {page_title: set() for page_title in page_by_title.keys()} - for page_title, articles in page_by_title.items(): - for article in articles: - for citation in article.citations: - pages_cited[page_title].add(citation.target) - pages_cited_by[citation.target].add(page_title) - for page_title, cite_titles in pages_cited.items(): - pages_cited[page_title] = len(cite_titles) - for page_title, cite_titles in pages_cited_by.items(): - pages_cited_by[page_title] = len(cite_titles) - - top_citations = reverse_statistics_dict(pages_cited)[:3] - top_citations_items = itemize(top_citations) - content += stat_block.format( - "Cited the most pages:", - "
".join(top_citations_items)) - top_cited = reverse_statistics_dict(pages_cited_by)[:3] - top_cited_items = itemize(top_cited) - content += stat_block.format( - "Cited by the most pages:", - "
".join(top_cited_items)) - - # Top article length - article_length_by_title = {} - cumulative_article_length_by_turn = { - turn_num: 0 - for turn_num in range(min_turn, max_turn + 1) - } - for article_title, article in article_by_title.items(): - format_map = { - "c"+str(c.id): c.text - for c in article.citations - } - plain_content = article.content.format(**format_map) - word_count = len(plain_content.split()) - article_length_by_title[article_title] = word_count - for turn_num in range(min_turn, max_turn + 1): - if article.turn <= turn_num: - cumulative_article_length_by_turn[turn_num] += word_count - top_length = reverse_statistics_dict(article_length_by_title)[:3] - top_length_items = itemize(top_length) - content += stat_block.format( - "Longest articles:", - "
".join(top_length_items)) - - # Total word count - len_list = [(str(k), [str(v)]) for k,v in cumulative_article_length_by_turn.items()] - content += stat_block.format( - "Aggregate word count by turn:", - "
".join(itemize(len_list))) - - # Player pageranks - pagerank_by_player = {player: 0 for player in players} - for page_title, articles in page_by_title.items(): - page_author = articles[0].player - if page_author is not None: - pagerank_by_player[page_author] += pagerank_by_title[page_title] - for player, pagerank in pagerank_by_player.items(): - pagerank_by_player[player] = round(pagerank, 3) - player_rank = reverse_statistics_dict(pagerank_by_player) - player_rank_items = itemize(player_rank) - content += stat_block.format( - "Player aggregate page rank:", - "
".join(player_rank_items)) - - # Player citations made - pages_cited_by_player = {player: 0 for player in players} - for article_title, article in article_by_title.items(): - if article.player is not None: - pages_cited_by_player[article.player] += len(article.citations) - player_cites_made_ranks = reverse_statistics_dict(pages_cited_by_player) - player_cites_made_items = itemize(player_cites_made_ranks) - content += "
\n" - content += "Citations made by player:
\n" - content += "
\n".join(player_cites_made_items) - content += "
\n" - - # Player cited count - pages_cited_by_by_player = {player: 0 for player in players} - for page_title, articles in page_by_title.items(): - page_author = articles[0].player - if page_author is not None: - pages_cited_by_by_player[page_author] += len(articles[0].citedby) - cited_times_ranked = reverse_statistics_dict(pages_cited_by_by_player) - cited_times_items = itemize(cited_times_ranked) - content += "
\n" - content += "Citations made to article by player:
\n" - content += "
\n".join(cited_times_items) - content += "
\n" - - # Lowest pagerank of written articles - exclude = [a.title for a in articles if a.player is None] - rank_by_written_only = {k:v for k,v in pagerank_by_title.items() if k not in exclude} - pageranks = reverse_statistics_dict(rank_by_written_only) - bot_ranked = list(enumerate(map(lambda x: x[1], pageranks), start=1))[-10:] - # Format the ranks into strings - bot_ranked_items = itemize(bot_ranked) - content += "
\n" - content += "Bottom 10 articles by pagerank:
\n" - content += "
\n".join(bot_ranked_items) - content += "
\n" - - # Undercited articles - undercited = { - page_title: len(articles[0].citedby) - for page_title, articles in page_by_title.items() - if len(articles[0].citedby) < 2} - undercited_items = itemize(reverse_statistics_dict(undercited)) - content += "
\n" - content += "Undercited articles:
\n" - content += "
\n".join(undercited_items) - content += "
\n" + # Create all the stats blocks. + lexicon_stats = LexiconStatistics(articles) + stats_blocks = [] + for stat in stats: + if hasattr(lexicon_stats, stat): + stats_blocks.append(getattr(lexicon_stats, stat)()) + else: + print("ERROR: Bad stat {}".format(stat)) + content = "\n".join(stats_blocks) # Fill in the entry skeleton return page.format(title="Statistics", content=content) @@ -620,7 +453,7 @@ def build_all(path_prefix, lexicon_name): f.write(build_session_page(page, config["SESSION_PAGE"])) print(" Wrote Session") with open(pathto("statistics", "index.html"), "w", encoding="utf-8") as f: - f.write(build_statistics_page(page, articles)) + f.write(build_statistics_page(config, page, articles)) print(" Wrote Statistics") # Write auxiliary pages diff --git a/lexipython/resources/lexicon.cfg b/lexipython/resources/lexicon.cfg index 66d0d35..9d03e53 100644 --- a/lexipython/resources/lexicon.cfg +++ b/lexipython/resources/lexicon.cfg @@ -48,6 +48,21 @@ char:WXYZ etc:&c. <<>>STATISTICS>>> +top_pagerank on +most_citations_made on +most_citations_to on +longest_article on +cumulative_wordcount off +player_pagerank on +player_citations_made on +player_citations_to on +bottom_pagerank off +undercited off +<<>>DEFAULT_SORT>>> diff --git a/lexipython/statistics.py b/lexipython/statistics.py new file mode 100644 index 0000000..85b4ec9 --- /dev/null +++ b/lexipython/statistics.py @@ -0,0 +1,317 @@ +# Third party imports +try: + import networkx # For pagerank analytics + NETWORKX_ENABLED = True +except: + NETWORKX_ENABLED = False + +# Application imports +from utils import titlesort + + +def reverse_statistics_dict(stats, reverse=True): + """ + Transforms a dictionary mapping titles to a value into a list of values + and lists of titles. The list is sorted by the value, and the titles are + sorted alphabetically. + """ + rev = {} + for key, value in stats.items(): + if value not in rev: + rev[value] = [] + rev[value].append(key) + for key, value in rev.items(): + rev[key] = sorted(value, key=lambda t: titlesort(t)) + return sorted(rev.items(), key=lambda x:x[0], reverse=reverse) + + +def itemize(stats_list): + """ + Formats a list consisting of tuples of ranks and lists of ranked items. + """ + return map(lambda x: "{0} – {1}".format(x[0], "; ".join(x[1])), stats_list) + + +class LexiconStatistics(): + """ + A wrapper for a persistent statistics context with some precomputed + values around for convenience. + + The existence of addendum articles complicates how some statistics are + computed. An addendum is an article, with its own author, body, and + citations, but in a Lexicon it exists appended to another article. To handle + this, we distinguish an _article_ from a _page_. An article is a unit parsed + from a single source file. A page is a main article and all addendums under + the same title. + """ + + def __init__(self, articles): + self.articles = articles + self.min_turn = 0 + self.max_turn = 0 + self.players = set() + self.title_to_article = {} + self.title_to_page = {} + self.stat_block = "
{0}
{1}
\n" + # Pagerank may not be computable if networkx isn't installed. + self.title_to_pagerank = None + + for main_article in articles: + page_title = main_article.title + self.title_to_page[page_title] = [main_article] + self.title_to_page[page_title].extend(main_article.addendums) + for article in self.title_to_page[page_title]: + # Disambiguate articles by appending turn number to the title + key = "{0.title} (T{0.turn})".format(article) + self.title_to_article[key] = article + if article.player is not None: + # Phantoms have turn MAXINT by convention + self.min_turn = min(self.min_turn, article.turn) + self.max_turn = max(self.max_turn, article.turn) + self.players.add(article.player) + + def _try_populate_pagerank(self): + """Computes pagerank if networkx is imported.""" + if NETWORKX_ENABLED and self.title_to_pagerank is None: + # Create a citation graph linking page titles. + G = networkx.Graph() + for page_title, articles in self.title_to_page.items(): + for article in articles: + for citation in article.citations: + G.add_edge(page_title, citation.target) + + # Compute pagerank on the page citation graph. + self.title_to_pagerank = networkx.pagerank(G) + # Any article with no links in the citation graph have no pagerank. + # Assign these pagerank 0 to avoid key errors or missing pages in + # the stats. + for page_title, articles in self.title_to_page.items(): + if page_title not in self.title_to_pagerank: + self.title_to_pagerank[page_title] = 0 + + def stat_top_pagerank(self): + """Computes the top 10 pages by pagerank.""" + self._try_populate_pagerank() + + if not self.title_to_pagerank: + # If networkx was not successfully imported, skip the pagerank. + top_ranked_items = "networkx must be installed to compute pageranks." + + else: + # Get the top ten articles by pagerank. + top_pageranks = reverse_statistics_dict(self.title_to_pagerank)[:10] + # Replace the pageranks with ordinals. + top_ranked = enumerate(map(lambda x: x[1], top_pageranks), start=1) + # Format the ranks into strings. + top_ranked_items = itemize(top_ranked) + + # Format the statistics block. + return self.stat_block.format( + "Top 10 articles by page rank:", + "
".join(top_ranked_items)) + + def stat_most_citations_made(self): + """Computes the top 3 ranks for citations made FROM a page.""" + # Determine which pages are cited from all articles on a page. + pages_cited = { + page_title: set() + for page_title in self.title_to_page.keys()} + for page_title, articles in self.title_to_page.items(): + for article in articles: + for citation in article.citations: + pages_cited[page_title].add(citation.target) + # Compute the number of unique articles cited by a page. + for page_title, cite_titles in pages_cited.items(): + pages_cited[page_title] = len(cite_titles) + + # Reverse and itemize the citation counts. + top_citations = reverse_statistics_dict(pages_cited)[:3] + top_citations_items = itemize(top_citations) + + # Format the statistics block. + return self.stat_block.format( + "Cited the most pages:", + "
".join(top_citations_items)) + + def stat_most_citations_to(self): + """Computes the top 3 ranks for citations made TO a page.""" + # Determine which pages cite a page. + pages_cited_by = { + page_title: set() + for page_title in self.title_to_page.keys()} + for page_title, articles in self.title_to_page.items(): + for article in articles: + for citation in article.citations: + pages_cited_by[citation.target].add(page_title) + # Compute the number of unique articles that cite a page. + for page_title, cite_titles in pages_cited_by.items(): + pages_cited_by[page_title] = len(cite_titles) + + # Reverse and itemize the citation counts. + top_cited = reverse_statistics_dict(pages_cited_by)[:3] + top_cited_items = itemize(top_cited) + + # Format the statistics block. + return self.stat_block.format( + "Cited by the most pages:", + "
".join(top_cited_items)) + + def stat_longest_article(self): + """Computes the top 3 longest articles.""" + # Compute the length of each article (not page). + title_to_article_length = {} + for article_title, article in self.title_to_article.items(): + # Write all citation aliases into the article text to accurately + # compute word count as written. + format_map = { + "c"+str(c.id): c.text + for c in article.citations + } + plain_content = article.content.format(**format_map) + word_count = len(plain_content.split()) + title_to_article_length[article_title] = word_count + + # Reverse and itemize the article lengths. + top_length = reverse_statistics_dict(title_to_article_length)[:3] + top_length_items = itemize(top_length) + + # Format the statistics block. + return self.stat_block.format( + "Longest articles:", + "
".join(top_length_items)) + + def stat_cumulative_wordcount(self): + """Computes the cumulative word count of the lexicon.""" + # Initialize all extant turns to 0. + turn_to_cumulative_wordcount = { + turn_num: 0 + for turn_num in range(self.min_turn, self.max_turn + 1) + } + for article_title, article in self.title_to_article.items(): + # Compute each article's word count. + format_map = { + "c"+str(c.id): c.text + for c in article.citations + } + plain_content = article.content.format(**format_map) + word_count = len(plain_content.split()) + # Add the word count to each turn the article exists in. + for turn_num in range(self.min_turn, self.max_turn + 1): + if article.turn <= turn_num: + turn_to_cumulative_wordcount[turn_num] += word_count + + # Format the statistics block. + len_list = [(str(k), [str(v)]) for k,v in turn_to_cumulative_wordcount.items()] + return self.stat_block.format( + "Aggregate word count by turn:", + "
".join(itemize(len_list))) + + def stat_player_pagerank(self): + """Computes each player's share of the lexicon's pagerank scores.""" + self._try_populate_pagerank() + + if not self.title_to_pagerank: + # If networkx was not successfully imported, skip the pagerank. + player_rank_items = "networkx must be installed to compute pageranks." + + else: + player_to_pagerank = { + player: 0 + for player in self.players} + # Accumulate page pagerank to the main article's author. + for page_title, articles in self.title_to_page.items(): + page_author = articles[0].player + if page_author is not None: + player_to_pagerank[page_author] += self.title_to_pagerank[page_title] + # Round pageranks off to 3 decimal places. + for player, pagerank in player_to_pagerank.items(): + player_to_pagerank[player] = round(pagerank, 3) + + # Reverse and itemize the aggregated pageranks. + player_rank = reverse_statistics_dict(player_to_pagerank) + player_rank_items = itemize(player_rank) + + # Format the statistics block. + return self.stat_block.format( + "Player aggregate page rank:", + "
".join(player_rank_items)) + + def stat_player_citations_made(self): + """Computes the total number of citations made BY each player.""" + pages_cited_by_player = { + player: 0 + for player in self.players} + # Add the number of citations from each authored article (not page). + for article_title, article in self.title_to_article.items(): + if article.player is not None: + pages_cited_by_player[article.player] += len(article.citations) + + # Reverse and itemize the counts. + player_cites_made_ranks = reverse_statistics_dict(pages_cited_by_player) + player_cites_made_items = itemize(player_cites_made_ranks) + + # Format the statistics block. + return self.stat_block.format( + "Citations made by player:", + "
".join(player_cites_made_items)) + + def stat_player_citations_to(self): + """Computes the total number of citations made TO each player's + authored pages.""" + pages_cited_by_by_player = { + player: 0 + for player in self.players} + # Add the number of citations made to each page (not article). + for page_title, articles in self.title_to_page.items(): + page_author = articles[0].player + if page_author is not None: + pages_cited_by_by_player[page_author] += len(articles[0].citedby) + + # Reverse and itemize the results. + cited_times_ranked = reverse_statistics_dict(pages_cited_by_by_player) + cited_times_items = itemize(cited_times_ranked) + + # Format the statistics block. + return self.stat_block.format( + "Citations made to article by player:", + "
".join(cited_times_items)) + + def stat_bottom_pagerank(self): + """Computes the bottom 10 pages by pagerank.""" + self._try_populate_pagerank() + + if not self.title_to_pagerank: + # If networkx was not successfully imported, skip the pagerank. + bot_ranked_items = "networkx must be installed to compute pageranks." + + else: + # Phantoms have no pagerank, because they don't cite anything. + exclude = [ + a.title + for a in self.articles + if a.player is None] + rank_by_written_only = { + k:v + for k,v in self.title_to_pagerank.items() + if k not in exclude} + + # Reverse, enumerate, and itemize the bottom 10 by pagerank. + pageranks = reverse_statistics_dict(rank_by_written_only) + bot_ranked = list(enumerate(map(lambda x: x[1], pageranks), start=1))[-10:] + bot_ranked_items = itemize(bot_ranked) + + # Format the statistics block. + return self.stat_block.format( + "Bottom 10 articles by page rank:", + "
".join(bot_ranked_items)) + + def stat_undercited(self): + """Computes which articles have 0 or 1 citations made to them.""" + undercited = { + page_title: len(articles[0].citedby) + for page_title, articles in self.title_to_page.items() + if len(articles[0].citedby) < 2} + undercited_items = itemize(reverse_statistics_dict(undercited)) + return self.stat_block.format( + "Undercited articles:", + "
".join(undercited_items)) From 172d1b2123b4b56b0087f21f048cb4f3038435ee Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 18 May 2019 00:20:24 -0700 Subject: [PATCH 31/40] Formatting typo in rules page --- lexipython/resources/rules.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lexipython/resources/rules.html b/lexipython/resources/rules.html index 2f8cf5a..9adefd3 100644 --- a/lexipython/resources/rules.html +++ b/lexipython/resources/rules.html @@ -55,7 +55,7 @@
  • As the game goes on, it may come to pass that a player must write an article in an index, but that index is full, and that player has already cited all the phantoms in it. When this happens, the player instead writes - their article as **Ersatz Scrivener**, radical skeptic. Ersatz does not + their article as Ersatz Scrivener, radical skeptic. Ersatz does not believe in the existence of whatever he is writing about, no matter how obvious it seems to others or how central it is in the developing history of the world. For Ersatz, all references, testimony, etc. with regard to From 56766d3ad3eacae78e8e67f96802f678670e5cee Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 18 May 2019 00:38:22 -0700 Subject: [PATCH 32/40] Fix ~ not being escaped --- lexipython/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lexipython/utils.py b/lexipython/utils.py index 5bae5ef..859e9a0 100644 --- a/lexipython/utils.py +++ b/lexipython/utils.py @@ -19,6 +19,7 @@ def titleescape(s): """ s = s.strip() s = re.sub(r"\s+", '_', s) # Replace whitespace with _ + s = re.sub(r"~", '_', s) # parse.quote doesn't catch ~ s = parse.quote(s) # Encode all other characters s = re.sub(r"%", "", s) # Strip encoding %s s = s[:64] # Limit to 64 characters From 309dc681273b42e1f4152f220479889218f59af9 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 18 May 2019 00:45:24 -0700 Subject: [PATCH 33/40] Use different escaping replacement --- lexipython/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lexipython/utils.py b/lexipython/utils.py index 859e9a0..f890721 100644 --- a/lexipython/utils.py +++ b/lexipython/utils.py @@ -19,7 +19,7 @@ def titleescape(s): """ s = s.strip() s = re.sub(r"\s+", '_', s) # Replace whitespace with _ - s = re.sub(r"~", '_', s) # parse.quote doesn't catch ~ + s = re.sub(r"~", '-', s) # parse.quote doesn't catch ~ s = parse.quote(s) # Encode all other characters s = re.sub(r"%", "", s) # Strip encoding %s s = s[:64] # Limit to 64 characters From 2a1c376c441284314524fe2bdb3aaffdb8942966 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 18 May 2019 14:19:26 -0700 Subject: [PATCH 34/40] Override newline for file writes --- lexipython/__main__.py | 6 +++--- lexipython/build.py | 18 +++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/lexipython/__main__.py b/lexipython/__main__.py index d866e79..b30991b 100644 --- a/lexipython/__main__.py +++ b/lexipython/__main__.py @@ -136,13 +136,13 @@ def command_init(name): # Edit the name field config = re.sub("Lexicon Title", "Lexicon {}".format(name), config) # Create the Lexicon's config file - with open(os.path.join(lex_path, "lexicon.cfg"), "w") as config_file: + with open(os.path.join(lex_path, "lexicon.cfg"), "w", newline='') as config_file: config_file.write(config) # Copy the CSS file - with open(os.path.join(lex_path, "lexicon.css"), "w") as css_file: + with open(os.path.join(lex_path, "lexicon.css"), "w", newline='') as css_file: css_file.write(utils.load_resource("lexicon.css")) # Create an example page - with open(os.path.join(lex_path, "src", "example-page.txt"), "w") as destfile: + with open(os.path.join(lex_path, "src", "example-page.txt"), "w", newline='') as destfile: destfile.write(utils.load_resource("example-page.txt")) # Create an empty status file open(os.path.join(lex_path, "status"), "w").close() diff --git a/lexipython/build.py b/lexipython/build.py index 45ec841..9fc2270 100644 --- a/lexipython/build.py +++ b/lexipython/build.py @@ -417,7 +417,7 @@ def build_all(path_prefix, lexicon_name): # Write the redirect page print("Writing redirect page...") - with open(pathto("index.html"), "w", encoding="utf8") as f: + with open(pathto("index.html"), "w", encoding="utf8", newline='') as f: f.write(utils.load_resource("redirect.html").format( lexicon=config["LEXICON_TITLE"], sort=config["DEFAULT_SORT"])) @@ -430,7 +430,7 @@ def build_all(path_prefix, lexicon_name): l = len(articles) for idx in range(l): article = articles[idx] - with open(pathto("article", article.title_filesafe + ".html"), "w", encoding="utf-8") as f: + with open(pathto("article", article.title_filesafe + ".html"), "w", encoding="utf-8", newline='') as f: content = article.build_default_content() article_html = page.format( title = article.title, @@ -440,29 +440,29 @@ def build_all(path_prefix, lexicon_name): # Write default pages print("Writing default pages...") - with open(pathto("contents", "index.html"), "w", encoding="utf-8") as f: + with open(pathto("contents", "index.html"), "w", encoding="utf-8", newline='') as f: f.write(build_contents_page(config, page, articles)) print(" Wrote Contents") - with open(pathto("rules", "index.html"), "w", encoding="utf-8") as f: + with open(pathto("rules", "index.html"), "w", encoding="utf-8", newline='') as f: f.write(build_rules_page(page)) print(" Wrote Rules") - with open(pathto("formatting", "index.html"), "w", encoding="utf-8") as f: + with open(pathto("formatting", "index.html"), "w", encoding="utf-8", newline='') as f: f.write(build_formatting_page(page)) print(" Wrote Formatting") - with open(pathto("session", "index.html"), "w", encoding="utf-8") as f: + with open(pathto("session", "index.html"), "w", encoding="utf-8", newline='') as f: f.write(build_session_page(page, config["SESSION_PAGE"])) print(" Wrote Session") - with open(pathto("statistics", "index.html"), "w", encoding="utf-8") as f: + with open(pathto("statistics", "index.html"), "w", encoding="utf-8", newline='') as f: f.write(build_statistics_page(config, page, articles)) print(" Wrote Statistics") # Write auxiliary pages if "PRINTABLE_FILE" in config and config["PRINTABLE_FILE"]: - with open(pathto(config["PRINTABLE_FILE"]), "w", encoding="utf-8") as f: + with open(pathto(config["PRINTABLE_FILE"]), "w", encoding="utf-8", newline='') as f: f.write(build_compiled_page(articles, config)) print(" Wrote compiled page to " + config["PRINTABLE_FILE"]) - with open(pathto("editor.html"), "w", encoding="utf-8") as f: + with open(pathto("editor.html"), "w", encoding="utf-8", newline='') as f: editor = utils.load_resource("editor.html") writtenArticles = "" phantomArticles = "" From 6d54390f519001990de193dc1c397fe80f9d0ae0 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 18 May 2019 14:42:13 -0700 Subject: [PATCH 35/40] De-compact css for readability --- lexipython/resources/lexicon.css | 146 ++++++++++++++++++++++++------- 1 file changed, 112 insertions(+), 34 deletions(-) diff --git a/lexipython/resources/lexicon.css b/lexipython/resources/lexicon.css index bb70d3b..988eeb4 100644 --- a/lexipython/resources/lexicon.css +++ b/lexipython/resources/lexicon.css @@ -1,36 +1,114 @@ -body { background-color: #eeeeee; line-height: 1.4; font-size: 16px; } -div#wrapper { max-width: 800px; position: absolute; left: 0; right: 0; - margin: 0 auto; } -div#header { padding: 5px; margin: 5px; background-color: #ffffff; - box-shadow: 2px 2px 10px #888888; border-radius: 5px; } -div#header p, div#header h2 { margin: 5px; } -div#sidebar { width: 200px; float:left; margin:5px; padding: 8px; - text-align: center; background-color: #ffffff; - box-shadow: 2px 2px 10px #888888; border-radius: 5px; } -img#logo { max-width: 200px; } -table { table-layout: fixed; width: 100%; } -div#sidebar table { border-collapse: collapse; } -div.citeblock table td:first-child + td a { justify-content: flex-end; } -table a { display: flex; padding: 3px; background-color: #dddddd; - border-radius: 5px; text-decoration: none; } -div#sidebar table a { justify-content: center; } -table a:hover { background-color: #cccccc; } -div#sidebar table td { padding: 0px; margin: 3px 0; - border-bottom: 8px solid transparent; } -div#content { position: absolute; right: 0px; left: 226px; max-width: 564px; - margin: 5px; } -div.contentblock { background-color: #ffffff; box-shadow: 2px 2px 10px #888888; - margin-bottom: 5px; padding: 10px; width: auto; border-radius: 5px; } -div.contentblock h3 { margin: 0.3em 0; } -a.phantom { color: #cc2200; } -div.citeblock a.phantom { font-style: italic; } -span.signature { text-align: right; } +body { + background-color: #eeeeee; + line-height: 1.4; + font-size: 16px; +} +div#wrapper { + max-width: 800px; + position: absolute; + left: 0; + right: 0; + margin: 0 auto; +} +div#header { + padding: 5px; + margin: 5px; + background-color: #ffffff; + box-shadow: 2px 2px 10px #888888; + border-radius: 5px; +} +div#header p, div#header h2 { + margin: 5px; +} +div#sidebar { + width: 200px; + float:left; + margin:5px; + padding: 8px; + text-align: center; + background-color: #ffffff; + box-shadow: 2px 2px 10px #888888; + border-radius: 5px; +} +img#logo { + max-width: 200px; +} +table { + table-layout: fixed; + width: 100%; +} +div#sidebar table { + border-collapse: collapse; +} +div.citeblock table td:first-child + td a { + justify-content: flex-end; +} +table a { + display: flex; + padding: 3px; + background-color: #dddddd; + border-radius: 5px; + text-decoration: none; +} +div#sidebar table a { + justify-content: center; +} +table a:hover { + background-color: #cccccc; +} +div#sidebar table td { + padding: 0px; margin: 3px 0; + border-bottom: 8px solid transparent; +} +div#content { + position: absolute; + right: 0px; + left: 226px; + max-width: 564px; + margin: 5px; +} +div.contentblock { + background-color: #ffffff; + box-shadow: 2px 2px 10px #888888; + margin-bottom: 5px; + padding: 10px; + width: auto; + border-radius: 5px; +} +div.contentblock h3 { + margin: 0.3em 0; +} +a.phantom { + color: #cc2200; +} +div.citeblock a.phantom { + font-style: italic; +} +span.signature { + text-align: right; +} @media only screen and (max-width: 816px) { - div#wrapper { padding: 5px; } - div#header { max-width: 554; margin: 0 auto; } - div#sidebar { max-width: 548; width: inherit; float: inherit; - margin: 5px auto; } - div#content { max-width: 564; position: static; right: inherit; - margin: 5px auto; } - img#logo { max-width: inherit; width: 100%; } + div#wrapper { + padding: 5px; + } + div#header { + max-width: 554; + margin: 0 auto; + } + div#sidebar { + max-width: 548; + width: inherit; + float: inherit; + margin: 5px auto; + } + div#content { + max-width: 564; + position: static; + right: inherit; + margin: 5px auto; + } + img#logo { + max-width: inherit; + width: 100%; + } } \ No newline at end of file From d6d145e7c5b545ab40f6369f6f58a7891bb42c70 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 18 May 2019 14:42:34 -0700 Subject: [PATCH 36/40] Fix tag order --- lexipython/article.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lexipython/article.py b/lexipython/article.py index 4490809..367a12d 100644 --- a/lexipython/article.py +++ b/lexipython/article.py @@ -304,6 +304,6 @@ class LexiconArticle: next_link = ("Next →".format( next_article) if next_article is not None else "") - content += "\n\n\n
    {}{}
    \n".format( + content += "\n\n\n
    {}{}
    \n".format( prev_link, next_link) return content From 9a15fee7075880d388200c6978228955a49341ea Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Sat, 18 May 2019 14:53:58 -0700 Subject: [PATCH 37/40] Add links to editor and full page in Session --- lexipython/build.py | 15 +++++++++++---- lexipython/resources/lexicon.css | 3 +++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lexipython/build.py b/lexipython/build.py index 9fc2270..4bbce95 100644 --- a/lexipython/build.py +++ b/lexipython/build.py @@ -173,12 +173,19 @@ def build_formatting_page(page): # Fill in the entry skeleton return page.format(title="Formatting", content=content) -def build_session_page(page, session_content): +def build_session_page(page, config): """ Builds the full HTML of the session page. """ - # Fill in the entry skeleton - content = "
    {}
    ".format(session_content) + # Misc links + content = '\n' + # Session content + content += "
    {}
    ".format(config["SESSION_PAGE"]) + return page.format(title="Session", content=content) def build_statistics_page(config, page, articles): @@ -450,7 +457,7 @@ def build_all(path_prefix, lexicon_name): f.write(build_formatting_page(page)) print(" Wrote Formatting") with open(pathto("session", "index.html"), "w", encoding="utf-8", newline='') as f: - f.write(build_session_page(page, config["SESSION_PAGE"])) + f.write(build_session_page(page, config)) print(" Wrote Session") with open(pathto("statistics", "index.html"), "w", encoding="utf-8", newline='') as f: f.write(build_statistics_page(config, page, articles)) diff --git a/lexipython/resources/lexicon.css b/lexipython/resources/lexicon.css index 988eeb4..e79734d 100644 --- a/lexipython/resources/lexicon.css +++ b/lexipython/resources/lexicon.css @@ -43,6 +43,9 @@ div#sidebar table { div.citeblock table td:first-child + td a { justify-content: flex-end; } +div.misclinks table td a { + justify-content: center; +} table a { display: flex; padding: 3px; From 31f401400cec7d7aa7306e84e116b8f939739c64 Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Mon, 20 May 2019 18:47:01 -0700 Subject: [PATCH 38/40] Fix sort type in redirect --- lexipython/build.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lexipython/build.py b/lexipython/build.py index 4bbce95..02c6aa5 100644 --- a/lexipython/build.py +++ b/lexipython/build.py @@ -426,7 +426,7 @@ def build_all(path_prefix, lexicon_name): print("Writing redirect page...") with open(pathto("index.html"), "w", encoding="utf8", newline='') as f: f.write(utils.load_resource("redirect.html").format( - lexicon=config["LEXICON_TITLE"], sort=config["DEFAULT_SORT"])) + lexicon=config["LEXICON_TITLE"], sort=parse_sort_type(config["DEFAULT_SORT"]))) # Write the article pages print("Deleting old article pages...") @@ -464,10 +464,10 @@ def build_all(path_prefix, lexicon_name): print(" Wrote Statistics") # Write auxiliary pages - if "PRINTABLE_FILE" in config and config["PRINTABLE_FILE"]: - with open(pathto(config["PRINTABLE_FILE"]), "w", encoding="utf-8", newline='') as f: + if "SEARCHABLE_FILE" in config and config["SEARCHABLE_FILE"]: + with open(pathto(config["SEARCHABLE_FILE"]), "w", encoding="utf-8", newline='') as f: f.write(build_compiled_page(articles, config)) - print(" Wrote compiled page to " + config["PRINTABLE_FILE"]) + print(" Wrote compiled page to " + config["SEARCHABLE_FILE"]) with open(pathto("editor.html"), "w", encoding="utf-8", newline='') as f: editor = utils.load_resource("editor.html") From 699dce47af028271c67e4040685a67dcef68aaec Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Mon, 20 May 2019 20:47:39 -0700 Subject: [PATCH 39/40] Add missing addendum config check --- lexipython/article.py | 17 ++++++++++++----- lexipython/build.py | 2 +- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/lexipython/article.py b/lexipython/article.py index 367a12d..7e1e94c 100644 --- a/lexipython/article.py +++ b/lexipython/article.py @@ -167,7 +167,7 @@ class LexiconArticle: return articles @staticmethod - def interlink(lexicon_articles): + def interlink(lexicon_articles, config): """ Fills out fields on articles that require other articles for context. Creates phantom articles. @@ -175,10 +175,17 @@ class LexiconArticle: # Preliminary assertion that title/turn is unique keys = set() for article in lexicon_articles: - if (article.title, article.turn) in keys: - raise ValueError("Found two articles with title '{}' and turn '{}'".format( - article.title, article.turn)) - keys.add((article.title, article.turn)) + if config['ALLOW_ADDENDA'].lower() == "true": + key = (article.title, article.turn) + if key in keys: + raise ValueError("Found two articles with title '{}' and turn '{}'".format( + *key)) + else: + key = article.title + if key in keys: + raise ValueError("Found two articles with title '{}'".format( + article.title)) + keys.add(key) # Sort out which articles are addendums and which titles are phantoms written_titles = set() cited_titles = set() diff --git a/lexipython/build.py b/lexipython/build.py index 02c6aa5..9c15023 100644 --- a/lexipython/build.py +++ b/lexipython/build.py @@ -416,7 +416,7 @@ def build_all(path_prefix, lexicon_name): # Once they've been populated, the articles list has the titles of all articles # Sort this by turn before title so prev/next links run in turn order articles = sorted( - LexiconArticle.interlink(articles), + LexiconArticle.interlink(articles, config), key=lambda a: (a.turn, utils.titlesort(a.title))) def pathto(*els): From e78787a5cff67b91d1d4bde5f4de53ca90ad74ac Mon Sep 17 00:00:00 2001 From: Tim Van Baak Date: Tue, 21 May 2019 00:53:41 -0700 Subject: [PATCH 40/40] Documentation updates --- LEXIPYTHON.md | 57 +++++++++++++++++++++++++++++++++++++++++++++------ README.md | 28 +++++++++++++++---------- 2 files changed, 68 insertions(+), 17 deletions(-) diff --git a/LEXIPYTHON.md b/LEXIPYTHON.md index ea9b0b4..1ed6a48 100644 --- a/LEXIPYTHON.md +++ b/LEXIPYTHON.md @@ -8,19 +8,64 @@ To play Lexicon, all you really need is something for the scholars to write thei To aid in playing Lexicon, Lexipython **does** provide the following: * Specialized markdown parsing into formatted Lexicon entries. +* An editor with a live preview of the parsed result and a download button. * HTML page generation and interlinking. -* Handy help pages for rules, session information, statistics, and more. +* Handy help pages for rules, markdown formatting, session information, and statistics. Lexipython **does not** provide: -* Web hosting for the Lexicon pages. The Editor should have a plan for distributing new editions of the Lexicon. +* Programmatic article submission. The current version of Lexipython does not involve a persistent server process. Players must send their articles to the Editor themselves. +* Web hosting for the Lexicon pages. The Editor should have a plan for distributing new editions of the Lexicon, such as GitHub Pages. * Checks for factual consistency between submitted articles. The Editor is responsible for ensuring scholarly rigor. ## Using Lexipython -To run a game of Lexicon with Lexipython, clone this repository out to a new folder: +To run a game of Lexicon with Lexipython, use [git](https://git-scm.com/) to clone this repository out to a new folder. ``` -$ git clone https://github.com/Jaculabilis/Lexipython.git [name] +$ git clone https://github.com/Jaculabilis/Lexipython.git ``` -Steps for setup: -1. [WIP] +Lexipython requires [Python 3](https://www.python.org/downloads/). It will run with only the Python 3 standard library installed, but pagerank statistics will be unavailable without `networkx` installed. +``` +$ pip install --user networkx +``` + +When you have the necessary software installed, open a terminal in the Lexipython directory. You can view the usage of the program with +``` +$ python lexipython -h +usage: lexipython [-h] [name] [command] + +Lexipython is a Python application for playing the Lexicon RPG. + +positional arguments: + name The name of the Lexicon to operate on + command The operation to perform on the Lexicon + +optional arguments: + -h, --help show this help message and exit + +Run lexipython.py without arguments to list the extant Lexicons. + +Available commands: + + init Create a Lexicon with the provided name + build Build the Lexicon, then exit + run Launch a persistent server managing the Lexicon +``` + +Your lexicons are stored in the `lexicon/` folder. Run `python lexipython` to see the status of all lexicons. Except I haven't implemented that yet. Ignore that bit. If you run `python lexipython [name]`, you'll get the status of the named lexicon. That also hasn't been implemented. Whoops! + +To create a lexicon, run `python lexipython [name] init` with the name of the lexicon. A folder will be created in `lexicon/` with the given name and some default files will be copied in. You'll need to add a logo image to the folder and edit the config. As players submit articles, place the .txt files in `lexicon/[name]/src/`. + +When you finish your initial edits to the config and whenever you want to update the generated HTML files, run `python lexipython [name] build`. Lexipython will regenerate the article pages under `lexicon/[name]/article/` as well as the contents, formatting, rules, session, and statistics pages, and the editor. + +To publish the pages, simply copy the lexicon's folder to wherever you're hosting the static files. If you wish, you can leave out the `src/` directory and the status and cfg files. They're are not navigable from the public-facing pages. + +The `run` command isn't implemented yet either, and to be honest that probably isn't how you're supposed to implement it in the first place. Ignore it for now. + +## Configuring a lexicon + +[`lexicon.cfg`](lexipython/resources/lexicon.cfg) contains comments explaining the various config options. `PROMPT` and `SESSION_PAGE` should be written as raw HTML, and will be inserted directly into the page. If you wish to use the Addendums rule explained in the main readme, set `ALLOW_ADDENDA` to `True`. If `SEARCHABLE_FILE` is defined, then the Session page will link to a file with all the articles on one page. + +## Other notes + +At the end of the build, Lexipython will check for players citing themselves. The program does not fault on these checks, because players may be writing articles as Ersatz Scrivener, or otherwise allowed to cite themselves. Watch out for any unexpected output here. \ No newline at end of file diff --git a/README.md b/README.md index 269cacf..4f37037 100644 --- a/README.md +++ b/README.md @@ -8,11 +8,11 @@ In Lexicon, each player takes on the role of a scholar. You are cranky, opiniona ## Basic Rules: What Everyone Should Know -1. Each Lexicon has a _topic statement_ that sets the tone for the game. It provides a starting point for shaping the developing world of the Lexicon. As it is a starting point, don't feel contrained to write only about the topics mentioned directly in it. +1. Each Lexicon has a **topic statement** that sets the tone for the game. It provides a starting point for shaping the developing world of the Lexicon. As it is a starting point, don't feel contrained to write only about the topics mentioned directly in it. -1. Articles are sorted under an _index_, a grouping of letters. An article is in an index if its first letter is in that group of letters. "The", "A", and "An" aren't counted in indexing. _Example: One of the indices is JKL. An article titled 'The Jabberwock' would index under JKL, not T's index._ +1. Articles are sorted under an **index**, a grouping of letters. An article is in an index if its first letter is in that group of letters. "The", "A", and "An" aren't counted in indexing. _Example: Two indices are JKL and TUV. An article titled 'The Jabberwock' would index under JKL, not TUV._ - 1. Until the game is over, some of the articles will have been cited, but not yet written. These are called _phantom_ articles. A phantom article has a title, which is defined by the first citation to it, but no content. + 1. Until the game is over, some of the articles will have been cited, but not yet written. These are called **phantom articles**. A phantom article has a title, which is defined by the first citation to it, but no content. 1. Generally, an index has a number of "slots" equal to the number of players. When an article is first written or cited, it takes up one slot in its corresponding index. @@ -26,7 +26,9 @@ In Lexicon, each player takes on the role of a scholar. You are cranky, opiniona 1. There are no hard and fast rules about length, but it is recommended that the Editor enforce a maximum word limit. In general, aiming for 200-300 words is ideal. - 1. You must respect and not contradict the factual content of all written articles. You may introduce new facts that put things in a new light, provide alternative interpretations, or flesh out unexplained details in unexpected ways; but you must not _contradict_ what has been previously established as fact. Use the "yes, and" rule from improv acting: accept what your fellow scholars have written and add to it in new ways, rather than trying to undo their work. This rule includes facts that have been established in written articles about the topics of phantom articles. + 1. You must respect and not contradict the factual content of all written articles. You may introduce new facts that put things in a new light, provide alternative interpretations, or flesh out unexplained details in unexpected ways; but you must not _contradict_ what has been previously established as fact. Use the "yes, and" rule from improv acting: accept what your fellow scholars have written and add to it in new ways, rather than trying to undo their work. + + 1. This rule includes facts that have been established in other, written articles about the topics of phantom articles. When you set out to write a phantom article, be sure to check what's been said about the topic already. Lexipython will list the articles that have cited your article. 1. Each article will cite other articles in the Lexicon. @@ -34,16 +36,18 @@ In Lexicon, each player takes on the role of a scholar. You are cranky, opiniona 1. As a corollary, you may not write phantom articles that you have cited. If you cite an article and then write it later, your former article now cites you, which is forbidden per the above. - 1. On the first turn, there are no written articles. Your first article must cite _exactly two_ phantom articles. + 1. On the first turn, there are no written articles. Your first article must cite **exactly two** phantom articles. - 1. On subsequent turns, your article must cite _exactly two_ phantoms, but you can cite phantoms that already exist. Your article must also cite _at least one_ written article. You can cite more than one. + 1. On subsequent turns, your article must cite **exactly two** phantoms, but you can cite phantoms that already exist. Your article must also cite **at least one** written article. You can cite more than one. - 1. On the penultimate turn, you must cite _exactly one_ phantom article and _at least two_ written articles. + 1. On the penultimate turn, you must cite **exactly one** phantom article and **at least two** written articles. - 1. On the final turn, you must cite _at least three_ written articles. + 1. On the final turn, you must cite **at least three** written articles. 1. As the game goes on, it may come to pass that a player must write an article in an index, but that index is full, and that player has already cited all the phantoms in it. When this happens, the player instead writes their article as **Ersatz Scrivener**, radical skeptic. Ersatz does not believe in the existence of whatever he is writing about, no matter how obvious it seems to others or how central it is in the developing history of the world. For Ersatz, all references, testimony, etc. with regard to its existence are tragic delusion at best or malicious lies at worst. Unlike the other scholars, Ersatz does not treat the research of his peers as fact, because he does not believe he has peers. Players writing articles as Ersatz are encouraged to lambast the amateur work of his misguided "collaborators". +1. Finally, the rules are always subject to the discretion of the Editor. + ## Procedural Rules: Running the Game ### The Editor @@ -60,7 +64,7 @@ The player running the game is the Editor. The Editor should handle the followin * **Topic statement.** The topic statement should be vague, but give the players some hooks to begin writing. Examples: "You are all revisionist scholars from the Paleotechnic Era arguing about how the Void Ghost Rebellion led to the overthrow of the cyber-gnostic theocracy and the establishment of the Third Republic"; "In the wake of the Quartile Reformation, you are scholars investigating the influence of Remigrationism on the Disquietists". What happened to the first two Republics or what Remigrationism is are left open for the players to determine. -* **Indices and turns.** In general, the Editor will decide on a number of turns and divide the alphabet into that many indices. Each player then takes one turn in each index. A game of 6 or 8 turns is suggested. _Example: An 8-turn game over the indices ABC/DEF/GHI/JKL/MNO/PQRS/TUV/QXYZ._ The Editor should determine how much time the players can devote to playing Lexicon and set a time limit on turns accordingly. +* **Indices and turns.** In general, the Editor will decide on a number of turns and divide the alphabet into that many indices. Each player then takes one turn in each index. A game of 6 or 8 turns is suggested. _Example: An 8-turn game over the indices ABC/DEF/GHI/JKL/MNO/PQRS/TUV/WXYZ._ The Editor should determine how much time the players can devote to playing Lexicon and set a time limit on turns accordingly. * **Index assignments.** Each turn, the Editor should assign each player to an index. Unless players have a method of coordinating who is writing what article, it is suggested that the Editor always assign players to write in different indices. The easiest way to do this is to distribute players randomly across the indices for the first turn, then move them through the indices in order, wrapping around to the top from the bottom. @@ -74,7 +78,7 @@ How the game develops is entirely up to the players, and your group may have a d * Even if articles don't get too long, having too many articles on one subject can lead to the same problem of writing on the topic becoming too hard to do consistently. Avoid having multiple articles about the same thing, and avoid having too many articles about different facets of one particular element of the world. -* Encyclopedias are written about things in the past. Players may, of course, want to mention how something in the past still affects the world in the present day. However, if players begin to write about purely contemporary things or events, the Lexicon shifts from an _encyclopedic_ work to a _narrative_ one. If that's what you want out of the game, go ahead and do so, but writing about an ongoing narrative insead of settled history introduce the additional complication of keeping abreast of the current state of the plot. It is more difficult for players to avoid contradiction when the facts are changing as they write. +* Encyclopedias are written about things in the past. Players may, of course, want to mention how something in the past still affects the world in the present day. However, if players begin to write about purely contemporary things or events, the Lexicon shifts from an _encyclopedic_ work to a _narrative_ one. If that's what you want out of the game, go ahead and do so, but writing about an ongoing narrative insead of settled history introduces the additional complication of keeping abreast of the current state of the plot. It is more difficult for players to avoid contradiction when the facts are changing as they write. * Articles whose titles do not begin with a character in any index pattern are sorted to the "&c" index. This usually includes numbers and symbols. If the Editor wants to make purposive use of this, they can assign players to it as an index. @@ -82,7 +86,9 @@ How the game develops is entirely up to the players, and your group may have a d The Editor is always free to alter the game procedures when it would make for a better game. The following are some known rule variations: -* **Follow the Phantoms:** Players make two phantom citations on the first turn. On subsequent turns, rather than choosing from phantoms and open slots in an assigned index, players must write an existing phantom. Until all slots are full, players must make one of their phantom citations to a new phantom article and one to an existing phantom. +* **Follow the Phantoms:** Players make phantom citations as normal on the first turn. On subsequent turns, rather than choosing from phantoms and open slots in an assigned index, players must write an existing phantom. Until all slots are full, players must make one of their phantom citations to a new phantom article and one to an existing phantom. + +* **Addendums:** In addition to writing new and phantom articles, players can write articles with the same title as an already-written article. The content of these "addendum" articles is added as a postscript at the bottom of the first article written under that title. Addendums can legally cite what their author can cite, not what the main article's author can cite. * Occasionally, if more players make a citation to an index than there are open slots, the index will be over capacity. If the Editor is assigning players to indices in order, the Editor may need to shift players' index assignments around. This may also be useful for decreasing the number of Ersatz articles, if a player can't write in their assigned index but could write in another.