+ def __init__(self, data=[], estimate_of_missing=None):
+ data1, data2 = itertools.tee(data)
+ self.total = sum([d[1] for d in data1])
+ for key, count in data2:
+ self[key] = log10(count / self.total)
+ self.estimate_of_missing = estimate_of_missing or (lambda k, N: 1./N)
+ def __missing__(self, key):
+ return self.estimate_of_missing(key, self.total)
+
+def log_probability_of_unknown_word(key, N):
+ """Estimate the probability of an unknown word.
+ """
+ return -log10(N * 10**((len(key) - 2) * 1.4))
+
+Pw = Pdist(datafile('count_1w.txt'), log_probability_of_unknown_word)
+Pl = Pdist(datafile('count_1l.txt'), lambda _k, _N: 0)
+P2l = Pdist(datafile('count_2l.txt'), lambda _k, _N: 0)
+
+def Pwords(words):
+ """The Naive Bayes log probability of a sequence of words.
+ """
+ return sum(Pw[w.lower()] for w in words)
+
+def Pletters(letters):
+ """The Naive Bayes log probability of a sequence of letters.
+ """
+ return sum(Pl[l.lower()] for l in letters)
+
+def Pbigrams(letters):
+ """The Naive Bayes log probability of the bigrams formed from a sequence
+ of letters.
+ """
+ return sum(P2l[p] for p in ngrams(letters, 2))
+
+
+def cosine_distance_score(text):
+ """Finds the dissimilarity of a text to English, using the cosine distance
+ of the frequency distribution.
+
+ >>> cosine_distance_score('abcabc') # doctest: +ELLIPSIS
+ 0.370847405...
+ """
+ return norms.cosine_distance(english_counts,
+ collections.Counter(sanitise(text)))