X-Git-Url: https://git.njae.me.uk/?a=blobdiff_plain;f=segment.py;h=dd0b2a8347ee800c4addf996f369ea0293b47bb7;hb=3906e8a686e3d1943e22746b65c394a4def34fc0;hp=f90af1d92c8fe0046b4f5c7a4975f53e7011e6ea;hpb=269665fe76e7aeb87165a87d3a1cbb755a5e3768;p=cipher-tools.git diff --git a/segment.py b/segment.py index f90af1d..dd0b2a8 100644 --- a/segment.py +++ b/segment.py @@ -1,54 +1,54 @@ -# import re, string, random, glob, operator, heapq import string import collections from math import log10 +import itertools +import sys +from functools import lru_cache +sys.setrecursionlimit(1000000) -def memo(f): - "Memoize function f." - table = {} - def fmemo(*args): - if args not in table: - table[args] = f(*args) - return table[args] - fmemo.memo = table - return fmemo - -@memo +@lru_cache() def segment(text): - "Return a list of words that is the best segmentation of text." + """Return a list of words that is the best segmentation of text. + """ if not text: return [] - candidates = ([first]+segment(rem) for first,rem in splits(text)) + candidates = ([first]+segment(rest) for first,rest in splits(text)) return max(candidates, key=Pwords) def splits(text, L=20): - "Return a list of all possible (first, rem) pairs, len(first)<=L." + """Return a list of all possible (first, rest) pairs, len(first)<=L. + """ return [(text[:i+1], text[i+1:]) for i in range(min(len(text), L))] def Pwords(words): - "The Naive Bayes probability of a sequence of words." - return product(Pw(w) for w in words) + """The Naive Bayes log probability of a sequence of words. + """ + return sum(Pw[w.lower()] for w in words) class Pdist(dict): - "A probability distribution estimated from counts in datafile." - def __init__(self, data=[], N=None, missingfn=None): - for key,count in data: - self[key] = self.get(key, 0) + int(count) - self.N = float(N or sum(self.itervalues())) - self.missingfn = missingfn or (lambda k, N: 1./N) - def __call__(self, key): - if key in self: return self[key]/self.N - else: return self.missingfn(key, self.N) + """A probability distribution estimated from counts in datafile. + Values are stored and returned as log probabilities. + """ + def __init__(self, data=[], estimate_of_missing=None): + data1, data2 = itertools.tee(data) + self.total = sum([int(d[1]) for d in data1]) + for key, count in data2: + self[key] = log10(int(count) / self.total) + self.estimate_of_missing = estimate_of_missing or (lambda k, N: 1./N) + def __missing__(self, key): + return self.estimate_of_missing(key, self.total) def datafile(name, sep='\t'): - "Read key,value pairs from file." - for line in file(name): - yield line.split(sep) + """Read key,value pairs from file. + """ + with open(name, 'r') as f: + for line in f: + yield line.split(sep) def avoid_long_words(key, N): - "Estimate the probability of an unknown word." - return 10./(N * 10**len(key)) - -N = 1024908267229 ## Number of tokens + """Estimate the probability of an unknown word. + """ + return -log10((N * 10**(len(key) - 2))) -Pw = Pdist(datafile('count_1w.txt'), N, avoid_long_words) +Pw = Pdist(datafile('count_1w.txt'), avoid_long_words) +