X-Git-Url: https://git.njae.me.uk/?a=blobdiff_plain;f=language_models.py;h=babbea19ceed80bd9d95fee347c64e53256b5626;hb=32a4467e6f7ac8ff2e6738118242ec4e4c255e8a;hp=ceb4596eb2fd87d3d2375f338892f9652525f2d4;hpb=96d46a680a808555a9ff77f2eaa68383569f07ee;p=cipher-training.git diff --git a/language_models.py b/language_models.py index ceb4596..babbea1 100644 --- a/language_models.py +++ b/language_models.py @@ -1,6 +1,10 @@ +"""Language-specific functions, including models of languages based on data of +its use. +""" + import string -import norms import random +import norms import collections import unicodedata import itertools @@ -16,7 +20,7 @@ def letters(text): return ''.join([c for c in text if c in string.ascii_letters]) def unaccent(text): - """Remove all accents from letters. + """Remove all accents from letters. It does this by converting the unicode string to decomposed compatability form, dropping all the combining accents, then re-encoding the bytes. @@ -37,7 +41,7 @@ def unaccent(text): def sanitise(text): """Remove all non-alphabetic characters and convert the text to lowercase - + >>> sanitise('The Quick') 'thequick' >>> sanitise('The Quick BROWN fox jumped! over... the (9lazy) DOG') @@ -72,20 +76,20 @@ with open('words.txt', 'r') as f: def weighted_choice(d): - """Generate random item from a dictionary of item counts - """ - target = random.uniform(0, sum(d.values())) - cuml = 0.0 - for (l, p) in d.items(): - cuml += p - if cuml > target: - return l - return None + """Generate random item from a dictionary of item counts + """ + target = random.uniform(0, sum(d.values())) + cuml = 0.0 + for (l, p) in d.items(): + cuml += p + if cuml > target: + return l + return None def random_english_letter(): - """Generate a random letter based on English letter counts - """ - return weighted_choice(normalised_english_counts) + """Generate a random letter based on English letter counts + """ + return weighted_choice(normalised_english_counts) def ngrams(text, n): @@ -120,14 +124,22 @@ def log_probability_of_unknown_word(key, N): return -log10(N * 10**((len(key) - 2) * 1.4)) Pw = Pdist(datafile('count_1w.txt'), log_probability_of_unknown_word) +Pw_wrong = Pdist(datafile('count_1w.txt'), lambda _k, N: log10(1/N)) Pl = Pdist(datafile('count_1l.txt'), lambda _k, _N: 0) P2l = Pdist(datafile('count_2l.txt'), lambda _k, _N: 0) +P3l = Pdist(datafile('count_3l.txt'), lambda _k, _N: 0) def Pwords(words): """The Naive Bayes log probability of a sequence of words. """ return sum(Pw[w.lower()] for w in words) +def Pwords_wrong(words): + """The Naive Bayes log probability of a sequence of words. + """ + return sum(Pw_wrong[w.lower()] for w in words) + + def Pletters(letters): """The Naive Bayes log probability of a sequence of letters. """ @@ -139,6 +151,12 @@ def Pbigrams(letters): """ return sum(P2l[p] for p in ngrams(letters, 2)) +def Ptrigrams(letters): + """The Naive Bayes log probability of the trigrams formed from a sequence + of letters. + """ + return sum(P3l[p] for p in ngrams(letters, 3)) + def cosine_similarity_score(text): """Finds the dissimilarity of a text to English, using the cosine distance