Started on documentation
[szyfrow.git] / docs / szyfrow / support / language_models.html
diff --git a/docs/szyfrow/support/language_models.html b/docs/szyfrow/support/language_models.html
new file mode 100644 (file)
index 0000000..765e22c
--- /dev/null
@@ -0,0 +1,637 @@
+<!doctype html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
+<meta name="generator" content="pdoc 0.9.2" />
+<title>szyfrow.support.language_models API documentation</title>
+<meta name="description" content="Descriptive models of a natural language (in this case, English) …" />
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
+<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
+<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
+<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
+<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
+<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
+</head>
+<body>
+<main>
+<article id="content">
+<header>
+<h1 class="title">Module <code>szyfrow.support.language_models</code></h1>
+</header>
+<section id="section-intro">
+<p>Descriptive models of a natural language (in this case, English).</p>
+<p>The functions <code><a title="szyfrow.support.language_models.Pwords" href="#szyfrow.support.language_models.Pwords">Pwords()</a></code>, <code><a title="szyfrow.support.language_models.Pletters" href="#szyfrow.support.language_models.Pletters">Pletters()</a></code>, <code><a title="szyfrow.support.language_models.Pbigrams" href="#szyfrow.support.language_models.Pbigrams">Pbigrams()</a></code>, and <code><a title="szyfrow.support.language_models.Ptrigrams" href="#szyfrow.support.language_models.Ptrigrams">Ptrigrams()</a></code> return the
+log probability of a section of text.</p>
+<p>If you want to use a different language, replace the data files in
+<a href="../language_model_files/index.html"><code>szyfrow/language_model_files</code></a>.</p>
+<ul>
+<li><code>count_1l.txt</code>: counts of single letters</li>
+<li><code>count_2l.txt</code>: counts of pairs letters, bigrams</li>
+<li><code>count_3l.txt</code>: counts of triples of letters, triagrams</li>
+<li><code>words.txt</code>: a dictionary of words, used for keyword-based cipher breaking.
+These words should only contain characters cointained in
+<code>string.ascii_letters</code>.</li>
+</ul>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">&#34;&#34;&#34;Descriptive models of a natural language (in this case, English).
+
+The functions `Pwords`, `Pletters`, `Pbigrams`, and `Ptrigrams` return the 
+log probability of a section of text.
+
+If you want to use a different language, replace the data files in 
+[`szyfrow/language_model_files`](../language_model_files/index.html).
+
+* `count_1l.txt`: counts of single letters
+* `count_2l.txt`: counts of pairs letters, bigrams
+* `count_3l.txt`: counts of triples of letters, triagrams
+* `words.txt`: a dictionary of words, used for keyword-based cipher breaking.
+  These words should only contain characters cointained in 
+  `string.ascii_letters`.
+
+&#34;&#34;&#34;
+
+import string
+import random
+import collections
+import itertools
+from math import log10
+import os 
+import importlib.resources as pkg_resources
+
+import szyfrow.support.norms
+from szyfrow.support.utilities import sanitise, deduplicate
+from szyfrow import language_model_files
+
+
+def datafile(name, sep=&#39;\t&#39;):
+    &#34;&#34;&#34;Read key,value pairs from file.
+    &#34;&#34;&#34;
+    with pkg_resources.open_text(language_model_files, name) as f:
+    # with open(p name), &#39;r&#39;) as f:
+        for line in f:
+            splits = line.split(sep)
+            yield [splits[0], int(splits[1])]
+
+english_counts = collections.Counter(dict(datafile(&#39;count_1l.txt&#39;)))
+&#34;&#34;&#34;Counts of single letters in English.&#34;&#34;&#34;
+normalised_english_counts = szyfrow.support.norms.normalise(english_counts)
+&#34;&#34;&#34;Normalised counts of single letters in English (the sum of all counts
+adds to 1).&#34;&#34;&#34;
+
+english_bigram_counts = collections.Counter(dict(datafile(&#39;count_2l.txt&#39;)))
+&#34;&#34;&#34;Counts of letter bigrams in English.&#34;&#34;&#34;
+normalised_english_bigram_counts = szyfrow.support.norms.normalise(english_bigram_counts)
+&#34;&#34;&#34;Normalised counts of letter bigrams in English (the sum of all counts
+adds to 1).&#34;&#34;&#34;
+
+english_trigram_counts = collections.Counter(dict(datafile(&#39;count_3l.txt&#39;)))
+&#34;&#34;&#34;Counts of letter trigrams in English.&#34;&#34;&#34;
+normalised_english_trigram_counts = szyfrow.support.norms.normalise(english_trigram_counts)
+&#34;&#34;&#34;Normalised counts of letter trigrams in English (the sum of all counts
+adds to 1).&#34;&#34;&#34;
+
+keywords = []
+&#34;&#34;&#34;A sample list of keywords, to act as a dictionary for 
+dictionary-based cipher breaking attempts.&#34;&#34;&#34;
+with pkg_resources.open_text(language_model_files, &#39;words.txt&#39;) as f:
+    keywords = [line.rstrip() for line in f]
+
+
+def transpositions_of(keyword):
+    &#34;&#34;&#34;Finds the transpostions given by a keyword. For instance, the keyword
+    &#39;clever&#39; rearranges to &#39;celrv&#39;, so the first column (0) stays first, the
+    second column (1) moves to third, the third column (2) moves to second, 
+    and so on.
+
+    If passed a tuple, assume it&#39;s already a transposition and just return it.
+
+    &gt;&gt;&gt; transpositions_of(&#39;clever&#39;)
+    (0, 2, 1, 4, 3)
+    &gt;&gt;&gt; transpositions_of(&#39;fred&#39;)
+    (3, 2, 0, 1)
+    &gt;&gt;&gt; transpositions_of((3, 2, 0, 1))
+    (3, 2, 0, 1)
+    &#34;&#34;&#34;
+    if isinstance(keyword, tuple):
+        return keyword
+    else:
+        key = deduplicate(keyword)
+        transpositions = tuple(key.index(l) for l in sorted(key))
+        return transpositions
+
+transpositions = collections.defaultdict(list)
+&#34;&#34;&#34;A sample dict of transpositions, to act as a dictionary for 
+dictionary-based cipher breaking attempts. Each key is a transposition, 
+each value is a list of words that give that transposition.&#34;&#34;&#34;
+for word in keywords:
+    transpositions[transpositions_of(word)] += [word]
+
+
+def weighted_choice(d):
+    &#34;&#34;&#34;Generate random item from a dictionary of item counts
+    &#34;&#34;&#34;
+    delems, dweights = list(zip(*d.items()))
+    return random.choices(delems, dweights)[0] 
+    # target = random.uniform(0, sum(d.values()))
+    # cuml = 0.0
+    # for (l, p) in d.items():
+    #     cuml += p
+    #     if cuml &gt; target:
+    #         return l
+    # return None
+
+def random_english_letter():
+    &#34;&#34;&#34;Generate a random letter based on English letter counts
+    &#34;&#34;&#34;
+    return weighted_choice(normalised_english_counts)
+
+
+def ngrams(text, n):
+    &#34;&#34;&#34;Returns all n-grams of a text
+    
+    &gt;&gt;&gt; ngrams(sanitise(&#39;the quick brown fox&#39;), 2) # doctest: +NORMALIZE_WHITESPACE
+    [&#39;th&#39;, &#39;he&#39;, &#39;eq&#39;, &#39;qu&#39;, &#39;ui&#39;, &#39;ic&#39;, &#39;ck&#39;, &#39;kb&#39;, &#39;br&#39;, &#39;ro&#39;, &#39;ow&#39;, &#39;wn&#39;, 
+     &#39;nf&#39;, &#39;fo&#39;, &#39;ox&#39;]
+    &gt;&gt;&gt; ngrams(sanitise(&#39;the quick brown fox&#39;), 4) # doctest: +NORMALIZE_WHITESPACE
+    [&#39;theq&#39;, &#39;hequ&#39;, &#39;equi&#39;, &#39;quic&#39;, &#39;uick&#39;, &#39;ickb&#39;, &#39;ckbr&#39;, &#39;kbro&#39;, &#39;brow&#39;, 
+     &#39;rown&#39;, &#39;ownf&#39;, &#39;wnfo&#39;, &#39;nfox&#39;]
+    &#34;&#34;&#34;
+    return [text[i:i+n] for i in range(len(text)-n+1)]
+
+
+class Pdist(dict):
+    &#34;&#34;&#34;A probability distribution estimated from counts in datafile.
+    Values are stored and returned as log probabilities.
+    &#34;&#34;&#34;
+    def __init__(self, data=[], estimate_of_missing=None):
+        data1, data2 = itertools.tee(data)
+        self.total = sum([d[1] for d in data1])
+        for key, count in data2:
+            self[key] = log10(count / self.total)
+        self.estimate_of_missing = estimate_of_missing or (lambda k, N: 1./N)
+    def __missing__(self, key):
+        return self.estimate_of_missing(key, self.total)
+
+def log_probability_of_unknown_word(key, N):
+    &#34;&#34;&#34;Estimate the probability of an unknown word.
+    &#34;&#34;&#34;
+    return -log10(N * 10**((len(key) - 2) * 1.4))
+
+Pw = Pdist(datafile(&#39;count_1w.txt&#39;), log_probability_of_unknown_word)
+&#34;&#34;&#34;A [Pdist](#szyfrow.support.language_models.Pdist) holding log probabilities 
+of words. Unknown words have their probability estimated by 
+[log_probability_of_unknown_word](#szyfrow.support.language_models.log_probability_of_unknown_word)&#34;&#34;&#34;
+Pl = Pdist(datafile(&#39;count_1l.txt&#39;), lambda _k, _N: 0)
+&#34;&#34;&#34;A [Pdist](#szyfrow.support.language_models.Pdist) holding log probabilities 
+of single letters. Unknown words have their probability estimated as zero.&#34;&#34;&#34;
+P2l = Pdist(datafile(&#39;count_2l.txt&#39;), lambda _k, _N: 0)
+&#34;&#34;&#34;A [Pdist](#szyfrow.support.language_models.Pdist) holding log probabilities 
+of letter bigrams. Unknown words have their probability estimated as zero.&#34;&#34;&#34;
+P3l = Pdist(datafile(&#39;count_3l.txt&#39;), lambda _k, _N: 0)
+&#34;&#34;&#34;A [Pdist](#szyfrow.support.language_models.Pdist) holding log probabilities 
+of letter trigrams. Unknown words have their probability estimated as zero.&#34;&#34;&#34;
+
+def Pwords(words): 
+    &#34;&#34;&#34;The Naive Bayes log probability of a sequence of words.
+    &#34;&#34;&#34;
+    return sum(Pw[w.lower()] for w in words)
+
+def Pletters(letters):
+    &#34;&#34;&#34;The Naive Bayes log probability of a sequence of letters.
+    &#34;&#34;&#34;
+    return sum(Pl[l.lower()] for l in letters)
+
+def Pbigrams(letters):
+    &#34;&#34;&#34;The Naive Bayes log probability of the bigrams formed from a sequence 
+    of letters.
+    &#34;&#34;&#34;
+    return sum(P2l[p] for p in ngrams(letters, 2))
+
+def Ptrigrams(letters):
+    &#34;&#34;&#34;The Naive Bayes log probability of the trigrams formed from a sequence
+    of letters.
+    &#34;&#34;&#34;
+    return sum(P3l[p] for p in ngrams(letters, 3))
+
+
+def cosine_distance_score(text):
+    &#34;&#34;&#34;Finds the dissimilarity of a text to English, using the cosine distance
+    of the frequency distribution.
+
+    &gt;&gt;&gt; cosine_distance_score(&#39;abcabc&#39;) # doctest: +ELLIPSIS
+    0.73771...
+    &#34;&#34;&#34;
+    # return szyfrow.support.norms.cosine_distance(english_counts, 
+    #     collections.Counter(sanitise(text)))
+    return 1 - szyfrow.support.norms.cosine_similarity(english_counts, 
+        collections.Counter(sanitise(text)))
+
+
+if __name__ == &#34;__main__&#34;:
+    import doctest
+    doctest.testmod()</code></pre>
+</details>
+</section>
+<section>
+</section>
+<section>
+<h2 class="section-title" id="header-variables">Global variables</h2>
+<dl>
+<dt id="szyfrow.support.language_models.P2l"><code class="name">var <span class="ident">P2l</span></code></dt>
+<dd>
+<div class="desc"><p>A <a href="#szyfrow.support.language_models.Pdist">Pdist</a> holding log probabilities
+of letter bigrams. Unknown words have their probability estimated as zero.</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.P3l"><code class="name">var <span class="ident">P3l</span></code></dt>
+<dd>
+<div class="desc"><p>A <a href="#szyfrow.support.language_models.Pdist">Pdist</a> holding log probabilities
+of letter trigrams. Unknown words have their probability estimated as zero.</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.Pl"><code class="name">var <span class="ident">Pl</span></code></dt>
+<dd>
+<div class="desc"><p>A <a href="#szyfrow.support.language_models.Pdist">Pdist</a> holding log probabilities
+of single letters. Unknown words have their probability estimated as zero.</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.Pw"><code class="name">var <span class="ident">Pw</span></code></dt>
+<dd>
+<div class="desc"><p>A <a href="#szyfrow.support.language_models.Pdist">Pdist</a> holding log probabilities
+of words. Unknown words have their probability estimated by
+<a href="#szyfrow.support.language_models.log_probability_of_unknown_word">log_probability_of_unknown_word</a></p></div>
+</dd>
+<dt id="szyfrow.support.language_models.english_bigram_counts"><code class="name">var <span class="ident">english_bigram_counts</span></code></dt>
+<dd>
+<div class="desc"><p>Counts of letter bigrams in English.</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.english_counts"><code class="name">var <span class="ident">english_counts</span></code></dt>
+<dd>
+<div class="desc"><p>Counts of single letters in English.</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.english_trigram_counts"><code class="name">var <span class="ident">english_trigram_counts</span></code></dt>
+<dd>
+<div class="desc"><p>Counts of letter trigrams in English.</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.keywords"><code class="name">var <span class="ident">keywords</span></code></dt>
+<dd>
+<div class="desc"><p>A sample list of keywords, to act as a dictionary for
+dictionary-based cipher breaking attempts.</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.normalised_english_bigram_counts"><code class="name">var <span class="ident">normalised_english_bigram_counts</span></code></dt>
+<dd>
+<div class="desc"><p>Normalised counts of letter bigrams in English (the sum of all counts
+adds to 1).</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.normalised_english_counts"><code class="name">var <span class="ident">normalised_english_counts</span></code></dt>
+<dd>
+<div class="desc"><p>Normalised counts of single letters in English (the sum of all counts
+adds to 1).</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.normalised_english_trigram_counts"><code class="name">var <span class="ident">normalised_english_trigram_counts</span></code></dt>
+<dd>
+<div class="desc"><p>Normalised counts of letter trigrams in English (the sum of all counts
+adds to 1).</p></div>
+</dd>
+<dt id="szyfrow.support.language_models.transpositions"><code class="name">var <span class="ident">transpositions</span></code></dt>
+<dd>
+<div class="desc"><p>A sample dict of transpositions, to act as a dictionary for
+dictionary-based cipher breaking attempts. Each key is a transposition,
+each value is a list of words that give that transposition.</p></div>
+</dd>
+</dl>
+</section>
+<section>
+<h2 class="section-title" id="header-functions">Functions</h2>
+<dl>
+<dt id="szyfrow.support.language_models.Pbigrams"><code class="name flex">
+<span>def <span class="ident">Pbigrams</span></span>(<span>letters)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>The Naive Bayes log probability of the bigrams formed from a sequence
+of letters.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def Pbigrams(letters):
+    &#34;&#34;&#34;The Naive Bayes log probability of the bigrams formed from a sequence 
+    of letters.
+    &#34;&#34;&#34;
+    return sum(P2l[p] for p in ngrams(letters, 2))</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.Pletters"><code class="name flex">
+<span>def <span class="ident">Pletters</span></span>(<span>letters)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>The Naive Bayes log probability of a sequence of letters.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def Pletters(letters):
+    &#34;&#34;&#34;The Naive Bayes log probability of a sequence of letters.
+    &#34;&#34;&#34;
+    return sum(Pl[l.lower()] for l in letters)</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.Ptrigrams"><code class="name flex">
+<span>def <span class="ident">Ptrigrams</span></span>(<span>letters)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>The Naive Bayes log probability of the trigrams formed from a sequence
+of letters.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def Ptrigrams(letters):
+    &#34;&#34;&#34;The Naive Bayes log probability of the trigrams formed from a sequence
+    of letters.
+    &#34;&#34;&#34;
+    return sum(P3l[p] for p in ngrams(letters, 3))</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.Pwords"><code class="name flex">
+<span>def <span class="ident">Pwords</span></span>(<span>words)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>The Naive Bayes log probability of a sequence of words.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def Pwords(words): 
+    &#34;&#34;&#34;The Naive Bayes log probability of a sequence of words.
+    &#34;&#34;&#34;
+    return sum(Pw[w.lower()] for w in words)</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.cosine_distance_score"><code class="name flex">
+<span>def <span class="ident">cosine_distance_score</span></span>(<span>text)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Finds the dissimilarity of a text to English, using the cosine distance
+of the frequency distribution.</p>
+<pre><code class="language-python-repl">&gt;&gt;&gt; cosine_distance_score('abcabc') # doctest: +ELLIPSIS
+0.73771...
+</code></pre></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def cosine_distance_score(text):
+    &#34;&#34;&#34;Finds the dissimilarity of a text to English, using the cosine distance
+    of the frequency distribution.
+
+    &gt;&gt;&gt; cosine_distance_score(&#39;abcabc&#39;) # doctest: +ELLIPSIS
+    0.73771...
+    &#34;&#34;&#34;
+    # return szyfrow.support.norms.cosine_distance(english_counts, 
+    #     collections.Counter(sanitise(text)))
+    return 1 - szyfrow.support.norms.cosine_similarity(english_counts, 
+        collections.Counter(sanitise(text)))</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.datafile"><code class="name flex">
+<span>def <span class="ident">datafile</span></span>(<span>name, sep='\t')</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Read key,value pairs from file.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def datafile(name, sep=&#39;\t&#39;):
+    &#34;&#34;&#34;Read key,value pairs from file.
+    &#34;&#34;&#34;
+    with pkg_resources.open_text(language_model_files, name) as f:
+    # with open(p name), &#39;r&#39;) as f:
+        for line in f:
+            splits = line.split(sep)
+            yield [splits[0], int(splits[1])]</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.log_probability_of_unknown_word"><code class="name flex">
+<span>def <span class="ident">log_probability_of_unknown_word</span></span>(<span>key, N)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Estimate the probability of an unknown word.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def log_probability_of_unknown_word(key, N):
+    &#34;&#34;&#34;Estimate the probability of an unknown word.
+    &#34;&#34;&#34;
+    return -log10(N * 10**((len(key) - 2) * 1.4))</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.ngrams"><code class="name flex">
+<span>def <span class="ident">ngrams</span></span>(<span>text, n)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Returns all n-grams of a text</p>
+<pre><code class="language-python-repl">&gt;&gt;&gt; ngrams(sanitise('the quick brown fox'), 2) # doctest: +NORMALIZE_WHITESPACE
+['th', 'he', 'eq', 'qu', 'ui', 'ic', 'ck', 'kb', 'br', 'ro', 'ow', 'wn', 
+ 'nf', 'fo', 'ox']
+&gt;&gt;&gt; ngrams(sanitise('the quick brown fox'), 4) # doctest: +NORMALIZE_WHITESPACE
+['theq', 'hequ', 'equi', 'quic', 'uick', 'ickb', 'ckbr', 'kbro', 'brow', 
+ 'rown', 'ownf', 'wnfo', 'nfox']
+</code></pre></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def ngrams(text, n):
+    &#34;&#34;&#34;Returns all n-grams of a text
+    
+    &gt;&gt;&gt; ngrams(sanitise(&#39;the quick brown fox&#39;), 2) # doctest: +NORMALIZE_WHITESPACE
+    [&#39;th&#39;, &#39;he&#39;, &#39;eq&#39;, &#39;qu&#39;, &#39;ui&#39;, &#39;ic&#39;, &#39;ck&#39;, &#39;kb&#39;, &#39;br&#39;, &#39;ro&#39;, &#39;ow&#39;, &#39;wn&#39;, 
+     &#39;nf&#39;, &#39;fo&#39;, &#39;ox&#39;]
+    &gt;&gt;&gt; ngrams(sanitise(&#39;the quick brown fox&#39;), 4) # doctest: +NORMALIZE_WHITESPACE
+    [&#39;theq&#39;, &#39;hequ&#39;, &#39;equi&#39;, &#39;quic&#39;, &#39;uick&#39;, &#39;ickb&#39;, &#39;ckbr&#39;, &#39;kbro&#39;, &#39;brow&#39;, 
+     &#39;rown&#39;, &#39;ownf&#39;, &#39;wnfo&#39;, &#39;nfox&#39;]
+    &#34;&#34;&#34;
+    return [text[i:i+n] for i in range(len(text)-n+1)]</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.random_english_letter"><code class="name flex">
+<span>def <span class="ident">random_english_letter</span></span>(<span>)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Generate a random letter based on English letter counts</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def random_english_letter():
+    &#34;&#34;&#34;Generate a random letter based on English letter counts
+    &#34;&#34;&#34;
+    return weighted_choice(normalised_english_counts)</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.transpositions_of"><code class="name flex">
+<span>def <span class="ident">transpositions_of</span></span>(<span>keyword)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Finds the transpostions given by a keyword. For instance, the keyword
+'clever' rearranges to 'celrv', so the first column (0) stays first, the
+second column (1) moves to third, the third column (2) moves to second,
+and so on.</p>
+<p>If passed a tuple, assume it's already a transposition and just return it.</p>
+<pre><code class="language-python-repl">&gt;&gt;&gt; transpositions_of('clever')
+(0, 2, 1, 4, 3)
+&gt;&gt;&gt; transpositions_of('fred')
+(3, 2, 0, 1)
+&gt;&gt;&gt; transpositions_of((3, 2, 0, 1))
+(3, 2, 0, 1)
+</code></pre></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def transpositions_of(keyword):
+    &#34;&#34;&#34;Finds the transpostions given by a keyword. For instance, the keyword
+    &#39;clever&#39; rearranges to &#39;celrv&#39;, so the first column (0) stays first, the
+    second column (1) moves to third, the third column (2) moves to second, 
+    and so on.
+
+    If passed a tuple, assume it&#39;s already a transposition and just return it.
+
+    &gt;&gt;&gt; transpositions_of(&#39;clever&#39;)
+    (0, 2, 1, 4, 3)
+    &gt;&gt;&gt; transpositions_of(&#39;fred&#39;)
+    (3, 2, 0, 1)
+    &gt;&gt;&gt; transpositions_of((3, 2, 0, 1))
+    (3, 2, 0, 1)
+    &#34;&#34;&#34;
+    if isinstance(keyword, tuple):
+        return keyword
+    else:
+        key = deduplicate(keyword)
+        transpositions = tuple(key.index(l) for l in sorted(key))
+        return transpositions</code></pre>
+</details>
+</dd>
+<dt id="szyfrow.support.language_models.weighted_choice"><code class="name flex">
+<span>def <span class="ident">weighted_choice</span></span>(<span>d)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Generate random item from a dictionary of item counts</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def weighted_choice(d):
+    &#34;&#34;&#34;Generate random item from a dictionary of item counts
+    &#34;&#34;&#34;
+    delems, dweights = list(zip(*d.items()))
+    return random.choices(delems, dweights)[0] 
+    # target = random.uniform(0, sum(d.values()))
+    # cuml = 0.0
+    # for (l, p) in d.items():
+    #     cuml += p
+    #     if cuml &gt; target:
+    #         return l
+    # return None</code></pre>
+</details>
+</dd>
+</dl>
+</section>
+<section>
+<h2 class="section-title" id="header-classes">Classes</h2>
+<dl>
+<dt id="szyfrow.support.language_models.Pdist"><code class="flex name class">
+<span>class <span class="ident">Pdist</span></span>
+<span>(</span><span>data=[], estimate_of_missing=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>A probability distribution estimated from counts in datafile.
+Values are stored and returned as log probabilities.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">class Pdist(dict):
+    &#34;&#34;&#34;A probability distribution estimated from counts in datafile.
+    Values are stored and returned as log probabilities.
+    &#34;&#34;&#34;
+    def __init__(self, data=[], estimate_of_missing=None):
+        data1, data2 = itertools.tee(data)
+        self.total = sum([d[1] for d in data1])
+        for key, count in data2:
+            self[key] = log10(count / self.total)
+        self.estimate_of_missing = estimate_of_missing or (lambda k, N: 1./N)
+    def __missing__(self, key):
+        return self.estimate_of_missing(key, self.total)</code></pre>
+</details>
+<h3>Ancestors</h3>
+<ul class="hlist">
+<li>builtins.dict</li>
+</ul>
+</dd>
+</dl>
+</section>
+</article>
+<nav id="sidebar">
+<h1>Index</h1>
+<div class="toc">
+<ul></ul>
+</div>
+<ul id="index">
+<li><h3>Super-module</h3>
+<ul>
+<li><code><a title="szyfrow.support" href="index.html">szyfrow.support</a></code></li>
+</ul>
+</li>
+<li><h3><a href="#header-variables">Global variables</a></h3>
+<ul class="">
+<li><code><a title="szyfrow.support.language_models.P2l" href="#szyfrow.support.language_models.P2l">P2l</a></code></li>
+<li><code><a title="szyfrow.support.language_models.P3l" href="#szyfrow.support.language_models.P3l">P3l</a></code></li>
+<li><code><a title="szyfrow.support.language_models.Pl" href="#szyfrow.support.language_models.Pl">Pl</a></code></li>
+<li><code><a title="szyfrow.support.language_models.Pw" href="#szyfrow.support.language_models.Pw">Pw</a></code></li>
+<li><code><a title="szyfrow.support.language_models.english_bigram_counts" href="#szyfrow.support.language_models.english_bigram_counts">english_bigram_counts</a></code></li>
+<li><code><a title="szyfrow.support.language_models.english_counts" href="#szyfrow.support.language_models.english_counts">english_counts</a></code></li>
+<li><code><a title="szyfrow.support.language_models.english_trigram_counts" href="#szyfrow.support.language_models.english_trigram_counts">english_trigram_counts</a></code></li>
+<li><code><a title="szyfrow.support.language_models.keywords" href="#szyfrow.support.language_models.keywords">keywords</a></code></li>
+<li><code><a title="szyfrow.support.language_models.normalised_english_bigram_counts" href="#szyfrow.support.language_models.normalised_english_bigram_counts">normalised_english_bigram_counts</a></code></li>
+<li><code><a title="szyfrow.support.language_models.normalised_english_counts" href="#szyfrow.support.language_models.normalised_english_counts">normalised_english_counts</a></code></li>
+<li><code><a title="szyfrow.support.language_models.normalised_english_trigram_counts" href="#szyfrow.support.language_models.normalised_english_trigram_counts">normalised_english_trigram_counts</a></code></li>
+<li><code><a title="szyfrow.support.language_models.transpositions" href="#szyfrow.support.language_models.transpositions">transpositions</a></code></li>
+</ul>
+</li>
+<li><h3><a href="#header-functions">Functions</a></h3>
+<ul class="">
+<li><code><a title="szyfrow.support.language_models.Pbigrams" href="#szyfrow.support.language_models.Pbigrams">Pbigrams</a></code></li>
+<li><code><a title="szyfrow.support.language_models.Pletters" href="#szyfrow.support.language_models.Pletters">Pletters</a></code></li>
+<li><code><a title="szyfrow.support.language_models.Ptrigrams" href="#szyfrow.support.language_models.Ptrigrams">Ptrigrams</a></code></li>
+<li><code><a title="szyfrow.support.language_models.Pwords" href="#szyfrow.support.language_models.Pwords">Pwords</a></code></li>
+<li><code><a title="szyfrow.support.language_models.cosine_distance_score" href="#szyfrow.support.language_models.cosine_distance_score">cosine_distance_score</a></code></li>
+<li><code><a title="szyfrow.support.language_models.datafile" href="#szyfrow.support.language_models.datafile">datafile</a></code></li>
+<li><code><a title="szyfrow.support.language_models.log_probability_of_unknown_word" href="#szyfrow.support.language_models.log_probability_of_unknown_word">log_probability_of_unknown_word</a></code></li>
+<li><code><a title="szyfrow.support.language_models.ngrams" href="#szyfrow.support.language_models.ngrams">ngrams</a></code></li>
+<li><code><a title="szyfrow.support.language_models.random_english_letter" href="#szyfrow.support.language_models.random_english_letter">random_english_letter</a></code></li>
+<li><code><a title="szyfrow.support.language_models.transpositions_of" href="#szyfrow.support.language_models.transpositions_of">transpositions_of</a></code></li>
+<li><code><a title="szyfrow.support.language_models.weighted_choice" href="#szyfrow.support.language_models.weighted_choice">weighted_choice</a></code></li>
+</ul>
+</li>
+<li><h3><a href="#header-classes">Classes</a></h3>
+<ul>
+<li>
+<h4><code><a title="szyfrow.support.language_models.Pdist" href="#szyfrow.support.language_models.Pdist">Pdist</a></code></h4>
+</li>
+</ul>
+</li>
+</ul>
+</nav>
+</main>
+<footer id="footer">
+<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.9.2</a>.</p>
+</footer>
+</body>
+</html>
\ No newline at end of file