Updated letter counts and tests based on it.
authorNeil Smith <neil.git@njae.me.uk>
Tue, 24 Oct 2017 09:13:49 +0000 (10:13 +0100)
committerNeil Smith <neil.git@njae.me.uk>
Tue, 24 Oct 2017 09:13:49 +0000 (10:13 +0100)
count_1l.txt
language_models.py

index c1fc8efb1514c6b67cd0d66b1da928916a12ba45..e9ac0c6594c432e6a478fa453c6579459d88f26a 100644 (file)
@@ -1,26 +1,26 @@
-e      756288
-t      559059
-o      503173
-a      489107
-i      420131
-n      418342
-h      415853
-s      403715
-r      372431
-d      267381
-l      258537
-u      189758
-m      171836
-w      153882
-y      142711
-c      140497
-f      134935
-g      117474
-p      100241
-b      92647
-v      65181
-k      54114
-x      7386
-j      6590
-q      5488
-z      3575
+e      758103
+t      560576
+o      504520
+a      490129
+i      421240
+n      419374
+h      416369
+s      404473
+r      373599
+d      267917
+l      259023
+u      190269
+m      172199
+w      154157
+y      143040
+c      141094
+f      135318
+g      117888
+p      100690
+b      92919
+v      65297
+k      54248
+x      7414
+j      6679
+q      5499
+z      3577
index 8824bca4597327623798382288c2bcffb9d8005b..0fa6e85dc7f3732e2c36a1c1bc4ead827005023e 100644 (file)
@@ -126,6 +126,7 @@ def log_probability_of_unknown_word(key, N):
 Pw = Pdist(datafile('count_1w.txt'), log_probability_of_unknown_word)
 Pl = Pdist(datafile('count_1l.txt'), lambda _k, _N: 0)
 P2l = Pdist(datafile('count_2l.txt'), lambda _k, _N: 0)
+P3l = Pdist(datafile('count_3l.txt'), lambda _k, _N: 0)
 
 def Pwords(words): 
     """The Naive Bayes log probability of a sequence of words.
@@ -143,15 +144,29 @@ def Pbigrams(letters):
     """
     return sum(P2l[p] for p in ngrams(letters, 2))
 
+def Pbigrams(letters):
+    """The Naive Bayes log probability of the bigrams formed from a sequence 
+    of letters.
+    """
+    return sum(P2l[p] for p in ngrams(letters, 2))
+
+def Ptrigrams(letters):
+    """The Naive Bayes log probability of the trigrams formed from a sequence
+    of letters.
+    """
+    return sum(P3l[p] for p in ngrams(letters, 3))
+
 
 def cosine_distance_score(text):
     """Finds the dissimilarity of a text to English, using the cosine distance
     of the frequency distribution.
 
     >>> cosine_distance_score('abcabc') # doctest: +ELLIPSIS
-    0.370847405...
+    0.73777...
     """
-    return norms.cosine_distance(english_counts, 
+    # return norms.cosine_distance(english_counts, 
+    #     collections.Counter(sanitise(text)))
+    return 1 - norms.cosine_similarity(english_counts, 
         collections.Counter(sanitise(text)))