>>> caesar_break('ibxcsyorsaqcheyklxivoexlevmrimwxsfiqevvmihrsasrxliwyrh' \
'ecjsppsamrkwleppfmergefifvmhixscsymjcsyqeoixlm') # doctest: +ELLIPSIS
- (4, 0.31863952890183...)
+ (4, 0.080345432737...)
>>> caesar_break('wxwmaxdgheetgwuxztgptedbgznitgwwhpguxyhkxbmhvvtlbhgtee' \
'raxlmhiixweblmxgxwmhmaxybkbgztgwztsxwbgmxgmert') # doctest: +ELLIPSIS
- (19, 0.42152901235832...)
+ (19, 0.11189290326...)
>>> caesar_break('yltbbqnqnzvguvaxurorgenafsbezqvagbnornfgsbevpnaabjurer' \
'svaquvzyvxrnznazlybequrvfohgriraabjtbaruraprur') # doctest: +ELLIPSIS
- (13, 0.316029208075451...)
+ (13, 0.08293968842...)
"""
sanitised_message = sanitise(message)
best_shift = 0
'ls umfjsd jlsi zg hfsqysxog. ls dmmdtsd mx jls bats mh bkbsf. ls ' \
'bfmctsd kfmyxd jls lyj, mztanamyu xmc jm clm cku tmmeaxw kj lai kxd ' \
'clm ckuxj.') # doctest: +ELLIPSIS
- ((15, 22, True), 0.23570361818655...)
+ ((15, 22, True), 0.0598745365924...)
"""
sanitised_message = sanitise(message)
best_multiplier = 0
>>> keyword_break(keyword_encipher('this is a test message for the ' \
'keyword decipherment', 'elephant', 1), \
wordlist=['cat', 'elephant', 'kangaroo']) # doctest: +ELLIPSIS
- (('elephant', 1), 0.41643991598441...)
+ (('elephant', 1), 0.1066453448861...)
"""
best_keyword = ''
best_wrap_alphabet = True
>>> keyword_break_mp(keyword_encipher('this is a test message for the ' \
'keyword decipherment', 'elephant', 1), \
wordlist=['cat', 'elephant', 'kangaroo']) # doctest: +ELLIPSIS
- (('elephant', 1), 0.41643991598441...)
+ (('elephant', 1), 0.106645344886...)
"""
with Pool() as pool:
helper_args = [(message, word, wrap, metric, target_counts,
>>> scytale_break('tfeulchtrtteehwahsdehneoifeayfsondmwpltmaoalhikotoere' \
'dcweatehiplwxsnhooacgorrcrcraotohsgullasenylrendaianeplscdriioto' \
'aek') # doctest: +ELLIPSIS
- (6, 0.83453041115025...)
+ (6, 0.092599933059...)
"""
best_key = 0
best_fit = float("inf")
n-gram frequency analysis
>>> column_transposition_break(column_transposition_encipher(sanitise( \
- "Turing's homosexuality resulted in a criminal prosecution in 1952, \
- when homosexual acts were still illegal in the United Kingdom. "), \
+ "It is a truth universally acknowledged, that a single man in \
+ possession of a good fortune, must be in want of a wife. However \
+ little known the feelings or views of such a man may be on his \
+ first entering a neighbourhood, this truth is so well fixed in the \
+ minds of the surrounding families, that he is considered the \
+ rightful property of some one or other of their daughters."), \
'encipher'), \
translist={(2, 0, 5, 3, 1, 4, 6): ['encipher'], \
(5, 0, 6, 1, 3, 4, 2): ['fourteen'], \
(6, 1, 0, 4, 5, 3, 2): ['keyword']}) # doctest: +ELLIPSIS
- ((2, 0, 5, 3, 1, 4, 6), 0.898128626285...)
+ ((2, 0, 5, 3, 1, 4, 6), 0.0628106372...)
>>> column_transposition_break(column_transposition_encipher(sanitise( \
- "Turing's homosexuality resulted in a criminal prosecution in 1952, " \
- "when homosexual acts were still illegal in the United Kingdom."), \
+ "It is a truth universally acknowledged, that a single man in \
+ possession of a good fortune, must be in want of a wife. However \
+ little known the feelings or views of such a man may be on his \
+ first entering a neighbourhood, this truth is so well fixed in the \
+ minds of the surrounding families, that he is considered the \
+ rightful property of some one or other of their daughters."), \
'encipher'), \
translist={(2, 0, 5, 3, 1, 4, 6): ['encipher'], \
(5, 0, 6, 1, 3, 4, 2): ['fourteen'], \
(6, 1, 0, 4, 5, 3, 2): ['keyword']}, \
target_counts=normalised_english_trigram_counts) # doctest: +ELLIPSIS
- ((2, 0, 5, 3, 1, 4, 6), 1.1958792913127...)
+ ((2, 0, 5, 3, 1, 4, 6), 0.0592259560...)
"""
best_transposition = ''
best_fit = float("inf")
n-gram frequency analysis
>>> column_transposition_break_mp(column_transposition_encipher(sanitise( \
- "Turing's homosexuality resulted in a criminal prosecution in 1952, \
- when homosexual acts were still illegal in the United Kingdom. "), \
+ "It is a truth universally acknowledged, that a single man in \
+ possession of a good fortune, must be in want of a wife. However \
+ little known the feelings or views of such a man may be on his \
+ first entering a neighbourhood, this truth is so well fixed in the \
+ minds of the surrounding families, that he is considered the \
+ rightful property of some one or other of their daughters."), \
'encipher'), \
translist={(2, 0, 5, 3, 1, 4, 6): ['encipher'], \
(5, 0, 6, 1, 3, 4, 2): ['fourteen'], \
(6, 1, 0, 4, 5, 3, 2): ['keyword']}) # doctest: +ELLIPSIS
- ((2, 0, 5, 3, 1, 4, 6), 0.898128626285...)
+ ((2, 0, 5, 3, 1, 4, 6), 0.0628106372...)
>>> column_transposition_break_mp(column_transposition_encipher(sanitise( \
- "Turing's homosexuality resulted in a criminal prosecution in 1952, " \
- "when homosexual acts were still illegal in the United Kingdom."), \
+ "It is a truth universally acknowledged, that a single man in \
+ possession of a good fortune, must be in want of a wife. However \
+ little known the feelings or views of such a man may be on his \
+ first entering a neighbourhood, this truth is so well fixed in the \
+ minds of the surrounding families, that he is considered the \
+ rightful property of some one or other of their daughters."), \
'encipher'), \
translist={(2, 0, 5, 3, 1, 4, 6): ['encipher'], \
(5, 0, 6, 1, 3, 4, 2): ['fourteen'], \
(6, 1, 0, 4, 5, 3, 2): ['keyword']}, \
target_counts=normalised_english_trigram_counts) # doctest: +ELLIPSIS
- ((2, 0, 5, 3, 1, 4, 6), 1.1958792913127...)
+ ((2, 0, 5, 3, 1, 4, 6), 0.0592259560...)
"""
ngram_length = len(next(iter(target_counts.keys())))
with Pool() as pool:
>>> vigenere_keyword_break(vigenere_encipher(sanitise('this is a test ' \
'message for the vigenere decipherment'), 'cat'), \
wordlist=['cat', 'elephant', 'kangaroo']) # doctest: +ELLIPSIS
- ('cat', 0.4950195952826...)
+ ('cat', 0.15965224935...)
"""
best_keyword = ''
best_fit = float("inf")
>>> vigenere_keyword_break_mp(vigenere_encipher(sanitise('this is a test ' \
'message for the vigenere decipherment'), 'cat'), \
wordlist=['cat', 'elephant', 'kangaroo']) # doctest: +ELLIPSIS
- ('cat', 0.4950195952826...)
+ ('cat', 0.159652249358...)
"""
with Pool() as pool:
helper_args = [(message, word, metric, target_counts,
return keyword, fit
+def vigenere_ic_break(message, target_counts=normalised_english_counts):
+ key_length = vigenere_key_length(message),
+ key = vigenere_find_key(message, key_length)
+ return key
+
+def vigenere_key_length(message):
+ best_length = 0
+ best_ic = 0.0
+ for trial_length in range(1, 20):
+ splits = every_nth(message, trial_length)
+ freqs = [norms.scale(frequencies(s)) for s in splits]
+ ic = sum([sum([f ** 2 for f in fs.values()]) for fs in freqs]) / trial_length
+ logger.debug('Vigenere key length of {0} gives IC of {1}'.
+ format(trial_length, ic))
+ if ic > best_ic:
+ best_length = trial_length
+ best_ic = ic
+ return best_length, best_ic
+
+def vigenere_find_key(message, key_length):
+ splits = every_nth(message, key_length)
+ return ''.join([chr(caesar_break(s)[0] + ord('a')) for s in splits])
+
+
if __name__ == "__main__":
import doctest
doctest.testmod()
import collections
def normalise(frequencies):
- """Scale a set of frequenies so they have a unit euclidean length
+ """Scale a set of frequencies so they sum to one
>>> sorted(normalise({1: 1, 2: 0}).items())
[(1, 1.0), (2, 0.0)]
>>> sorted(normalise({1: 1, 2: 1}).items())
- [(1, 0.7071067811865475), (2, 0.7071067811865475)]
- >>> sorted(normalise({1: 1, 2: 1, 3: 1}).items())
- [(1, 0.5773502691896258), (2, 0.5773502691896258), (3, 0.5773502691896258)]
+ [(1, 0.5), (2, 0.5)]
+ >>> sorted(normalise({1: 1, 2: 1, 3: 1}).items()) # doctest: +ELLIPSIS
+ [(1, 0.333...), (2, 0.333...), (3, 0.333...)]
>>> sorted(normalise({1: 1, 2: 2, 3: 1}).items())
- [(1, 0.4082482904638631), (2, 0.8164965809277261), (3, 0.4082482904638631)]
- """
+ [(1, 0.25), (2, 0.5), (3, 0.25)]
+ """
+ length = sum([f for f in frequencies.values()])
+ return collections.defaultdict(int, ((k, v / length)
+ for (k, v) in frequencies.items()))
+
+def euclidean_scale(frequencies):
+ """Scale a set of frequencies so they have a unit euclidean length
+
+ >>> sorted(euclidean_scale({1: 1, 2: 0}).items())
+ [(1, 1.0), (2, 0.0)]
+ >>> sorted(euclidean_scale({1: 1, 2: 1}).items()) # doctest: +ELLIPSIS
+ [(1, 0.7071067...), (2, 0.7071067...)]
+ >>> sorted(euclidean_scale({1: 1, 2: 1, 3: 1}).items()) # doctest: +ELLIPSIS
+ [(1, 0.577350...), (2, 0.577350...), (3, 0.577350...)]
+ >>> sorted(euclidean_scale({1: 1, 2: 2, 3: 1}).items()) # doctest: +ELLIPSIS
+ [(1, 0.408248...), (2, 0.81649658...), (3, 0.408248...)]
+ """
length = sum([f ** 2 for f in frequencies.values()]) ** 0.5
return collections.defaultdict(int, ((k, v / length)
for (k, v) in frequencies.items()))
+
def scale(frequencies):
"""Scale a set of frequencies so the largest is 1
>>> l2({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1})
0.0
- >>> l2({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1})
- 1.7320508075688772
+ >>> l2({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS
+ 1.73205080...
>>> l2(normalise({'a':2, 'b':2, 'c':2}), normalise({'a':1, 'b':1, 'c':1}))
0.0
- >>> l2({'a':0, 'b':2, 'c':0}, {'a':1, 'b':1, 'c':1})
- 1.7320508075688772
- >>> l2(normalise({'a':0, 'b':2, 'c':0}), normalise({'a':1, 'b':1, 'c':1}))
- 0.9194016867619662
+ >>> l2({'a':0, 'b':2, 'c':0}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS
+ 1.732050807...
+ >>> l2(normalise({'a':0, 'b':2, 'c':0}), \
+ normalise({'a':1, 'b':1, 'c':1})) # doctest: +ELLIPSIS
+ 0.81649658...
>>> l2({'a':0, 'b':1}, {'a':1, 'b':1})
1.0
"""
euclidean_distance = l2
def l1(frequencies1, frequencies2):
- """Finds the distances between two frequency profiles, expressed as dictionaries.
- Assumes every key in frequencies1 is also in frequencies2
+ """Finds the distances between two frequency profiles, expressed as
+ dictionaries. Assumes every key in frequencies1 is also in frequencies2
>>> l1({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1})
0
return total
def l3(frequencies1, frequencies2):
- """Finds the distances between two frequency profiles, expressed as dictionaries.
- Assumes every key in frequencies1 is also in frequencies2
+ """Finds the distances between two frequency profiles, expressed as
+ dictionaries. Assumes every key in frequencies1 is also in frequencies2
>>> l3({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1})
0.0
- >>> l3({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1})
- 1.4422495703074083
- >>> l3({'a':0, 'b':2, 'c':0}, {'a':1, 'b':1, 'c':1})
- 1.4422495703074083
- >>> l3(normalise({'a':0, 'b':2, 'c':0}), normalise({'a':1, 'b':1, 'c':1}))
- 0.7721675487598008
+ >>> l3({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS
+ 1.44224957...
+ >>> l3({'a':0, 'b':2, 'c':0}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS
+ 1.4422495703...
+ >>> l3(normalise({'a':0, 'b':2, 'c':0}), \
+ normalise({'a':1, 'b':1, 'c':1})) # doctest: +ELLIPSIS
+ 0.718144896...
>>> l3({'a':0, 'b':1}, {'a':1, 'b':1})
1.0
- >>> l3(normalise({'a':0, 'b':1}), normalise({'a':1, 'b':1}))
- 0.7234757712960591
+ >>> l3(normalise({'a':0, 'b':1}), normalise({'a':1, 'b':1})) # doctest: +ELLIPSIS
+ 0.6299605249...
"""
total = 0
for k in frequencies1.keys():
1
>>> geometric_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':5, 'c':1})
3
- >>> geometric_mean(normalise({'a':2, 'b':2, 'c':2}), normalise({'a':1, 'b':5, 'c':1}))
- 0.057022248808851934
- >>> geometric_mean(normalise({'a':2, 'b':2, 'c':2}), normalise({'a':1, 'b':1, 'c':1}))
+ >>> geometric_mean(normalise({'a':2, 'b':2, 'c':2}), \
+ normalise({'a':1, 'b':5, 'c':1})) # doctest: +ELLIPSIS
+ 0.01382140...
+ >>> geometric_mean(normalise({'a':2, 'b':2, 'c':2}), \
+ normalise({'a':1, 'b':1, 'c':1})) # doctest: +ELLIPSIS
0.0
- >>> geometric_mean(normalise({'a':2, 'b':2, 'c':2}), normalise({'a':1, 'b':1, 'c':0}))
- 0.009720703533656434
+ >>> geometric_mean(normalise({'a':2, 'b':2, 'c':2}), \
+ normalise({'a':1, 'b':1, 'c':0})) # doctest: +ELLIPSIS
+ 0.009259259...
"""
total = 1
for k in frequencies1.keys():
1.0
>>> harmonic_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1})
1.0
- >>> harmonic_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':5, 'c':1})
- 1.2857142857142858
- >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), normalise({'a':1, 'b':5, 'c':1}))
- 0.3849001794597505
- >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), normalise({'a':1, 'b':1, 'c':1}))
+ >>> harmonic_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':5, 'c':1}) # doctest: +ELLIPSIS
+ 1.285714285...
+ >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), \
+ normalise({'a':1, 'b':5, 'c':1})) # doctest: +ELLIPSIS
+ 0.228571428571...
+ >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), \
+ normalise({'a':1, 'b':1, 'c':1})) # doctest: +ELLIPSIS
0
- >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), normalise({'a':1, 'b':1, 'c':0}))
- 0.17497266360581604
+ >>> harmonic_mean(normalise({'a':2, 'b':2, 'c':2}), \
+ normalise({'a':1, 'b':1, 'c':0})) # doctest: +ELLIPSIS
+ 0.2
"""
total = 0
for k in frequencies1.keys():
"""Finds the distances between two frequency profiles, expressed as dictionaries.
Assumes every key in frequencies1 is also in frequencies2
- >>> cosine_distance({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1})
- -2.220446049250313e-16
- >>> cosine_distance({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1})
- -2.220446049250313e-16
- >>> cosine_distance({'a':0, 'b':2, 'c':0}, {'a':1, 'b':1, 'c':1})
- 0.42264973081037416
- >>> cosine_distance({'a':0, 'b':1}, {'a':1, 'b':1})
- 0.29289321881345254
+ >>> cosine_distance({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS
+ -2.22044604...e-16
+ >>> cosine_distance({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS
+ -2.22044604...e-16
+ >>> cosine_distance({'a':0, 'b':2, 'c':0}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS
+ 0.4226497308...
+ >>> cosine_distance({'a':0, 'b':1}, {'a':1, 'b':1}) # doctest: +ELLIPSIS
+ 0.29289321881...
"""
numerator = 0
length1 = 0
return 1 - (numerator / (length1 ** 0.5 * length2 ** 0.5))
+def index_of_coincidence(frequencies):
+ """Finds the (expected) index of coincidence given a set of frequencies
+ """
+ return sum([f ** 2 for f in frequencies.values()]) * len(frequencies.keys())
+
+
if __name__ == "__main__":
import doctest
doctest.testmod()