X-Git-Url: https://git.njae.me.uk/?a=blobdiff_plain;f=language_models.py;h=63aac6bab48daf56f1bcec1fd649121d1d86f17b;hb=34ba3d687eb48af30929b4550f14d1a599179efd;hp=929746888d036fb54de3f1fbf228e296e0bcd027;hpb=3e8d2bd8cd7c623116fa3d2b77db954f51b191e4;p=cipher-training.git

diff --git a/language_models.py b/language_models.py
index 9297468..63aac6b 100644
--- a/language_models.py
+++ b/language_models.py
@@ -100,7 +100,7 @@ def ngrams(text, n):
     """
     return [text[i:i+n] for i in range(len(text)-n+1)]
 
-
+    
 class Pdist(dict):
     """A probability distribution estimated from counts in datafile.
     Values are stored and returned as log probabilities.
@@ -120,14 +120,22 @@ def log_probability_of_unknown_word(key, N):
     return -log10(N * 10**((len(key) - 2) * 1.4))
 
 Pw = Pdist(datafile('count_1w.txt'), log_probability_of_unknown_word)
+Pw_wrong = Pdist(datafile('count_1w.txt'), lambda _k, N: log10(1/N))
 Pl = Pdist(datafile('count_1l.txt'), lambda _k, _N: 0)
 P2l = Pdist(datafile('count_2l.txt'), lambda _k, _N: 0)
+P3l = Pdist(datafile('count_3l.txt'), lambda _k, _N: 0)
 
 def Pwords(words): 
     """The Naive Bayes log probability of a sequence of words.
     """
     return sum(Pw[w.lower()] for w in words)
 
+def Pwords_wrong(words): 
+    """The Naive Bayes log probability of a sequence of words.
+    """
+    return sum(Pw_wrong[w.lower()] for w in words)
+
+
 def Pletters(letters):
     """The Naive Bayes log probability of a sequence of letters.
     """
@@ -139,15 +147,21 @@ def Pbigrams(letters):
     """
     return sum(P2l[p] for p in ngrams(letters, 2))
 
+def Ptrigrams(letters):
+    """The Naive Bayes log probability of the trigrams formed from a sequence 
+    of letters.
+    """
+    return sum(P3l[p] for p in ngrams(letters, 3))
+
 
-def cosine_distance_score(text):
+def cosine_similarity_score(text):
     """Finds the dissimilarity of a text to English, using the cosine distance
     of the frequency distribution.
 
-    >>> cosine_distance_score('abcabc') # doctest: +ELLIPSIS
-    0.370847405...
+    >>> cosine_similarity_score('abcabc') # doctest: +ELLIPSIS
+    0.26228882...
     """
-    return norms.cosine_distance(english_counts, 
+    return norms.cosine_similarity(english_counts, 
         collections.Counter(sanitise(text)))