'abcdefghijklmnopqrstuvwxyz'
"""
return ''.join([''.join(l)
- for l in zip_longest(*split_text, fillvalue='')])
+ for l in zip_longest(*split_text, fillvalue='')])
def chunks(text, n, fillvalue=None):
"""Split a text into chunks of n characters
def untranspose(items, transposition):
"""Undoes a transpose
-
+
>>> untranspose(['a', 'b', 'c', 'd'], [0,1,2,3])
['a', 'b', 'c', 'd']
>>> untranspose(['d', 'b', 'c', 'a'], [3,1,2,0])
return caesar_encipher(message, -shift)
def affine_encipher_letter(accented_letter, multiplier=1, adder=0,
- one_based=True):
+ one_based=True):
"""Encipher a letter, given a multiplier and adder
>>> ''.join([affine_encipher_letter(l, 3, 5, True) \
for l in string.ascii_uppercase])
if one_based: cipher_number += 1
plaintext_number = (
modular_division_table[multiplier]
- [(cipher_number - adder) % 26] )
+ [(cipher_number - adder) % 26]
+ )
if one_based: plaintext_number -= 1
return chr(plaintext_number % 26 + alphabet_start)
else:
return ''.join(enciphered)
-class Keyword_wrap_alphabet(Enum):
+class KeywordWrapAlphabet(Enum):
"""Ways of wrapping the alphabet for keyword-based substitution ciphers."""
from_a = 1
from_last = 2
def keyword_cipher_alphabet_of(keyword,
- wrap_alphabet=Keyword_wrap_alphabet.from_a):
+ wrap_alphabet=KeywordWrapAlphabet.from_a):
"""Find the cipher alphabet given a keyword.
wrap_alphabet controls how the rest of the alphabet is added
after the keyword.
>>> keyword_cipher_alphabet_of('bayes')
'bayescdfghijklmnopqrtuvwxz'
- >>> keyword_cipher_alphabet_of('bayes', Keyword_wrap_alphabet.from_a)
+ >>> keyword_cipher_alphabet_of('bayes', KeywordWrapAlphabet.from_a)
'bayescdfghijklmnopqrtuvwxz'
- >>> keyword_cipher_alphabet_of('bayes', Keyword_wrap_alphabet.from_last)
+ >>> keyword_cipher_alphabet_of('bayes', KeywordWrapAlphabet.from_last)
'bayestuvwxzcdfghijklmnopqr'
- >>> keyword_cipher_alphabet_of('bayes', Keyword_wrap_alphabet.from_largest)
+ >>> keyword_cipher_alphabet_of('bayes', KeywordWrapAlphabet.from_largest)
'bayeszcdfghijklmnopqrtuvwx'
"""
- if wrap_alphabet == Keyword_wrap_alphabet.from_a:
+ if wrap_alphabet == KeywordWrapAlphabet.from_a:
cipher_alphabet = ''.join(deduplicate(sanitise(keyword) +
string.ascii_lowercase))
else:
- if wrap_alphabet == Keyword_wrap_alphabet.from_last:
+ if wrap_alphabet == KeywordWrapAlphabet.from_last:
last_keyword_letter = deduplicate(sanitise(keyword))[-1]
else:
last_keyword_letter = sorted(sanitise(keyword))[-1]
def keyword_encipher(message, keyword,
- wrap_alphabet=Keyword_wrap_alphabet.from_a):
+ wrap_alphabet=KeywordWrapAlphabet.from_a):
"""Enciphers a message with a keyword substitution cipher.
wrap_alphabet controls how the rest of the alphabet is added
after the keyword.
>>> keyword_encipher('test message', 'bayes')
'rsqr ksqqbds'
- >>> keyword_encipher('test message', 'bayes', Keyword_wrap_alphabet.from_a)
+ >>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_a)
'rsqr ksqqbds'
- >>> keyword_encipher('test message', 'bayes', Keyword_wrap_alphabet.from_last)
+ >>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_last)
'lskl dskkbus'
- >>> keyword_encipher('test message', 'bayes', Keyword_wrap_alphabet.from_largest)
+ >>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_largest)
'qspq jsppbcs'
"""
cipher_alphabet = keyword_cipher_alphabet_of(keyword, wrap_alphabet)
cipher_translation = ''.maketrans(string.ascii_lowercase, cipher_alphabet)
return unaccent(message).lower().translate(cipher_translation)
-def keyword_decipher(message, keyword, wrap_alphabet=Keyword_wrap_alphabet.from_a):
+def keyword_decipher(message, keyword,
+ wrap_alphabet=KeywordWrapAlphabet.from_a):
"""Deciphers a message with a keyword substitution cipher.
wrap_alphabet controls how the rest of the alphabet is added
after the keyword.
>>> keyword_decipher('rsqr ksqqbds', 'bayes')
'test message'
- >>> keyword_decipher('rsqr ksqqbds', 'bayes', Keyword_wrap_alphabet.from_a)
+ >>> keyword_decipher('rsqr ksqqbds', 'bayes', KeywordWrapAlphabet.from_a)
'test message'
- >>> keyword_decipher('lskl dskkbus', 'bayes', Keyword_wrap_alphabet.from_last)
+ >>> keyword_decipher('lskl dskkbus', 'bayes', KeywordWrapAlphabet.from_last)
'test message'
- >>> keyword_decipher('qspq jsppbcs', 'bayes', Keyword_wrap_alphabet.from_largest)
+ >>> keyword_decipher('qspq jsppbcs', 'bayes', KeywordWrapAlphabet.from_largest)
'test message'
"""
cipher_alphabet = keyword_cipher_alphabet_of(keyword, wrap_alphabet)
pairs = zip(message, cycle(shifts))
return ''.join([caesar_decipher_letter(l, k) for l, k in pairs])
-beaufort_encipher=vigenere_decipher
-beaufort_decipher=vigenere_encipher
+beaufort_encipher = vigenere_decipher
+beaufort_decipher = vigenere_encipher
def transpositions_of(keyword):
"""Finds the transpostions given by a keyword. For instance, the keyword
'clever' rearranges to 'celrv', so the first column (0) stays first, the
- second column (1) moves to third, the third column (2) moves to second,
+ second column (1) moves to third, the third column (2) moves to second,
and so on.
If passed a tuple, assume it's already a transposition and just return it.
return padding
def column_transposition_encipher(message, keyword, fillvalue=' ',
- fillcolumnwise=False,
- emptycolumnwise=False):
+ fillcolumnwise=False,
+ emptycolumnwise=False):
"""Enciphers using the column transposition cipher.
Message is padded to allow all rows to be the same length.
return ''.join(chain(*transposed))
def column_transposition_decipher(message, keyword, fillvalue=' ',
- fillcolumnwise=False,
- emptycolumnwise=False):
+ fillcolumnwise=False,
+ emptycolumnwise=False):
"""Deciphers using the column transposition cipher.
Message is padded to allow all rows to be the same length.
"""
transpositions = [i for i in range(rows)]
return column_transposition_encipher(message, transpositions,
- fillvalue=fillvalue, fillcolumnwise=True, emptycolumnwise=False)
+ fillvalue=fillvalue, fillcolumnwise=True, emptycolumnwise=False)
def scytale_decipher(message, rows):
"""Deciphers using the scytale transposition cipher.
Assumes the message is padded so that all rows are the same length.
-
+
>>> scytale_decipher('tcnhkfeboqrxuo iw ', 3)
'thequickbrownfox '
>>> scytale_decipher('tubnhirfecooqkwx', 4)
ValueError: Wheel specification does not contain 26 letters
"""
if len(wheel_spec) != 13:
- raise ValueError("Wheel specification has {} pairs, requires 13".
- format(len(wheel_spec)))
+ raise ValueError("Wheel specification has {} pairs, requires"
+ " 13".format(len(wheel_spec)))
for p in wheel_spec:
if len(p) != 2:
raise ValueError("Not all mappings in wheel specification"
- "have two elements")
- if len(set([p[0] for p in wheel_spec] +
- [p[1] for p in wheel_spec])) != 26:
+ "have two elements")
+ if len(set([p[0] for p in wheel_spec] +
+ [p[1] for p in wheel_spec])) != 26:
raise ValueError("Wheel specification does not contain 26 letters")
def encipher_letter(self, letter):
+"""A set of functions to break the ciphers give in ciphers.py.
+"""
+
import string
import collections
import norms
import logging
import random
-from itertools import zip_longest, cycle, permutations, starmap
+import math
+from itertools import starmap
from segment import segment
from multiprocessing import Pool
-from math import log10
import matplotlib.pyplot as plt
def frequencies(text):
"""Count the number of occurrences of each character in text
-
+
>>> sorted(frequencies('abcdefabc').items())
[('a', 2), ('b', 2), ('c', 2), ('d', 1), ('e', 1), ('f', 1)]
>>> sorted(frequencies('the quick brown fox jumped over the lazy ' \
('W', 1), ('a', 1), ('c', 1), ('d', 1), ('e', 4), ('f', 1), ('h', 2),
('i', 1), ('j', 1), ('k', 1), ('l', 1), ('m', 1), ('o', 2), ('p', 1),
('r', 1), ('t', 1), ('u', 2), ('v', 1), ('x', 1), ('y', 1), ('z', 1)]
- >>> sorted(frequencies(sanitise('The Quick BROWN fox jumped! over... '\
+ >>> sorted(frequencies(sanitise('The Quick BROWN fox jumped! over... '\
'the (9lazy) DOG')).items()) # doctest: +NORMALIZE_WHITESPACE
[('a', 1), ('b', 1), ('c', 1), ('d', 2), ('e', 4), ('f', 1), ('g', 1),
('h', 2), ('i', 1), ('j', 1), ('k', 1), ('l', 1), ('m', 1), ('n', 1),
plaintext = caesar_decipher(sanitised_message, shift)
fit = fitness(plaintext)
logger.debug('Caesar break attempt using key {0} gives fit of {1} '
- 'and decrypt starting: {2}'.format(shift, fit,
- plaintext[:50]))
+ 'and decrypt starting: {2}'.format(shift, fit,
+ plaintext[:50]))
if fit > best_fit:
best_fit = fit
best_shift = shift
for one_based in [True, False]:
for multiplier in [x for x in range(1, 26, 2) if x != 13]:
for adder in range(26):
- plaintext = affine_decipher(sanitised_message,
+ plaintext = affine_decipher(sanitised_message,
multiplier, adder, one_based)
fit = fitness(plaintext)
logger.debug('Affine break attempt using key {0}x+{1} ({2}) '
'gives fit of {3} and decrypt starting: {4}'.
- format(multiplier, adder, one_based, fit,
+ format(multiplier, adder, one_based, fit,
plaintext[:50]))
if fit > best_fit:
best_fit = fit
'{3} and decrypt starting: {4}'.format(
best_multiplier, best_adder, best_one_based, best_fit,
affine_decipher(sanitised_message, best_multiplier,
- best_adder, best_one_based)[:50]))
+ best_adder, best_one_based)[:50]))
return (best_multiplier, best_adder, best_one_based), best_fit
def keyword_break(message, wordlist=keywords, fitness=Pletters):
- """Breaks a keyword substitution cipher using a dictionary and
- frequency analysis
+ """Breaks a keyword substitution cipher using a dictionary and
+ frequency analysis.
>>> keyword_break(keyword_encipher('this is a test message for the ' \
- 'keyword decipherment', 'elephant', Keyword_wrap_alphabet.from_last), \
+ 'keyword decipherment', 'elephant', KeywordWrapAlphabet.from_last), \
wordlist=['cat', 'elephant', 'kangaroo']) # doctest: +ELLIPSIS
- (('elephant', <Keyword_wrap_alphabet.from_last: 2>), -52.834575011...)
+ (('elephant', <KeywordWrapAlphabet.from_last: 2>), -52.834575011...)
"""
best_keyword = ''
best_wrap_alphabet = True
best_fit = float("-inf")
- for wrap_alphabet in Keyword_wrap_alphabet:
+ for wrap_alphabet in KeywordWrapAlphabet:
for keyword in wordlist:
plaintext = keyword_decipher(message, keyword, wrap_alphabet)
fit = fitness(plaintext)
best_wrap_alphabet))[:50]))
return (best_keyword, best_wrap_alphabet), best_fit
-def keyword_break_mp(message, wordlist=keywords, fitness=Pletters, chunksize=500):
- """Breaks a keyword substitution cipher using a dictionary and
+def keyword_break_mp(message, wordlist=keywords, fitness=Pletters,
+ chunksize=500):
+ """Breaks a keyword substitution cipher using a dictionary and
frequency analysis
>>> keyword_break_mp(keyword_encipher('this is a test message for the ' \
- 'keyword decipherment', 'elephant', Keyword_wrap_alphabet.from_last), \
+ 'keyword decipherment', 'elephant', KeywordWrapAlphabet.from_last), \
wordlist=['cat', 'elephant', 'kangaroo']) # doctest: +ELLIPSIS
- (('elephant', <Keyword_wrap_alphabet.from_last: 2>), -52.834575011...)
+ (('elephant', <KeywordWrapAlphabet.from_last: 2>), -52.834575011...)
"""
with Pool() as pool:
helper_args = [(message, word, wrap, fitness)
for word in wordlist
- for wrap in Keyword_wrap_alphabet]
+ for wrap in KeywordWrapAlphabet]
# Gotcha: the helper function here needs to be defined at the top level
# (limitation of Pool.starmap)
breaks = pool.starmap(keyword_break_worker, helper_args, chunksize)
wrap_alphabet, fit, sanitise(plaintext)[:50]))
return (keyword, wrap_alphabet), fit
-def monoalphabetic_break_hillclimbing(message, max_iterations = 10000000,
+def monoalphabetic_break_hillclimbing(message, max_iterations=10000000,
fitness=Pletters):
ciphertext = unaccent(message).lower()
alphabet = list(string.ascii_lowercase)
random.shuffle(alphabet)
alphabet = ''.join(alphabet)
return monoalphabetic_break_hillclimbing_worker(ciphertext, alphabet,
- max_iterations, fitness)
+ max_iterations, fitness)
def monoalphabetic_break_hillclimbing_mp(message, workers=10,
max_iterations = 10000000, fitness=Pletters, chunksize=1):
worker_args.append((ciphertext, alphabet, max_iterations, fitness))
with Pool() as pool:
breaks = pool.starmap(monoalphabetic_break_hillclimbing_worker,
- worker_args, chunksize)
+ worker_args, chunksize)
return max(breaks, key=lambda k: k[1])
-def monoalphabetic_break_hillclimbing_worker(message, alphabet,
+def monoalphabetic_break_hillclimbing_worker(message, alphabet,
max_iterations, fitness):
def swap(letters, i, j):
if i > j:
if i == j:
return letters
else:
- return letters[:i] + letters[j] + letters[i+1:j] +
- letters[i] + letters[j+1:]
+ return (letters[:i] + letters[j] + letters[i+1:j] + letters[i] +
+ letters[j+1:])
best_alphabet = alphabet
best_fitness = float('-inf')
for i in range(max_iterations):
return best_alphabet, best_fitness
-def column_transposition_break_mp(message, translist=transpositions,
- fitness=Pbigrams, chunksize=500):
- """Breaks a column transposition cipher using a dictionary and
+def column_transposition_break_mp(message, translist=transpositions,
+ fitness=Pbigrams, chunksize=500):
+ """Breaks a column transposition cipher using a dictionary and
n-gram frequency analysis
>>> column_transposition_break_mp(column_transposition_encipher(sanitise( \
"It is a truth universally acknowledged, that a single man in \
possession of a good fortune, must be in want of a wife. However \
little known the feelings or views of such a man may be on his \
- first entering a neighbourhood, this truth is so well fixed in the \
- minds of the surrounding families, that he is considered the \
+ first entering a neighbourhood, this truth is so well fixed in \
+ the minds of the surrounding families, that he is considered the \
rightful property of some one or other of their daughters."), \
'encipher'), \
translist={(2, 0, 5, 3, 1, 4, 6): ['encipher'], \
"It is a truth universally acknowledged, that a single man in \
possession of a good fortune, must be in want of a wife. However \
little known the feelings or views of such a man may be on his \
- first entering a neighbourhood, this truth is so well fixed in the \
- minds of the surrounding families, that he is considered the \
+ first entering a neighbourhood, this truth is so well fixed in \
+ the minds of the surrounding families, that he is considered the \
rightful property of some one or other of their daughters."), \
'encipher'), \
translist={(2, 0, 5, 3, 1, 4, 6): ['encipher'], \
(((2, 0, 5, 3, 1, 4, 6), False, False), -997.0129085...)
"""
with Pool() as pool:
- helper_args = [(message, trans, fillcolumnwise, emptycolumnwise,
- fitness)
- for trans in translist.keys()
+ helper_args = [(message, trans, fillcolumnwise, emptycolumnwise,
+ fitness)
+ for trans in translist.keys()
for fillcolumnwise in [True, False]
for emptycolumnwise in [True, False]]
- # Gotcha: the helper function here needs to be defined at the top level
+ # Gotcha: the helper function here needs to be defined at the top level
# (limitation of Pool.starmap)
- breaks = pool.starmap(column_transposition_break_worker,
- helper_args, chunksize)
+ breaks = pool.starmap(column_transposition_break_worker,
+ helper_args, chunksize)
return max(breaks, key=lambda k: k[1])
column_transposition_break = column_transposition_break_mp
-def column_transposition_break_worker(message, transposition,
+def column_transposition_break_worker(message, transposition,
fillcolumnwise, emptycolumnwise, fitness):
- plaintext = column_transposition_decipher(message, transposition,
+ plaintext = column_transposition_decipher(message, transposition,
fillcolumnwise=fillcolumnwise, emptycolumnwise=emptycolumnwise)
fit = fitness(sanitise(plaintext))
logger.debug('Column transposition break attempt using key {0} '
"It is a truth universally acknowledged, that a single man in \
possession of a good fortune, must be in want of a wife. However \
little known the feelings or views of such a man may be on his \
- first entering a neighbourhood, this truth is so well fixed in the \
- minds of the surrounding families, that he is considered the \
+ first entering a neighbourhood, this truth is so well fixed in \
+ the minds of the surrounding families, that he is considered the \
rightful property of some one or other of their daughters."), \
5)) # doctest: +ELLIPSIS
(5, -709.4646722...)
"It is a truth universally acknowledged, that a single man in \
possession of a good fortune, must be in want of a wife. However \
little known the feelings or views of such a man may be on his \
- first entering a neighbourhood, this truth is so well fixed in the \
- minds of the surrounding families, that he is considered the \
+ first entering a neighbourhood, this truth is so well fixed in \
+ the minds of the surrounding families, that he is considered the \
rightful property of some one or other of their daughters."), \
5), \
fitness=Ptrigrams) # doctest: +ELLIPSIS
(5, -997.0129085...)
"""
with Pool() as pool:
- helper_args = [(message, trans, False, True, fitness)
- for trans in
- [[col for col in range(math.ceil(len(message)/rows))]
+ helper_args = [(message, trans, False, True, fitness)
+ for trans in
+ [[col for col in range(math.ceil(len(message)/rows))]
for rows in range(1,max_key_length+1)]]
- # Gotcha: the helper function here needs to be defined at the top level
+ # Gotcha: the helper function here needs to be defined at the top level
# (limitation of Pool.starmap)
- breaks = pool.starmap(column_transposition_break_worker,
- helper_args, chunksize)
- best = max(breaks, key=lambda k: k[1])
+ breaks = pool.starmap(column_transposition_break_worker,
+ helper_args, chunksize)
+ best = max(breaks, key=lambda k: k[1])
return math.trunc(len(message) / len(best[0][0])), best[1]
scytale_break = scytale_break_mp
-def vigenere_keyword_break_mp(message, wordlist=keywords, fitness=Pletters,
- chunksize=500):
- """Breaks a vigenere cipher using a dictionary and
- frequency analysis
+def vigenere_keyword_break_mp(message, wordlist=keywords, fitness=Pletters,
+ chunksize=500):
+ """Breaks a vigenere cipher using a dictionary and frequency analysis.
>>> vigenere_keyword_break_mp(vigenere_encipher(sanitise('this is a test ' \
'message for the vigenere decipherment'), 'cat'), \
('cat', -52.947271216...)
"""
with Pool() as pool:
- helper_args = [(message, word, fitness)
+ helper_args = [(message, word, fitness)
for word in wordlist]
- # Gotcha: the helper function here needs to be defined at the top level
+ # Gotcha: the helper function here needs to be defined at the top level
# (limitation of Pool.starmap)
- breaks = pool.starmap(vigenere_keyword_break_worker, helper_args, chunksize)
+ breaks = pool.starmap(vigenere_keyword_break_worker, helper_args,
+ chunksize)
return max(breaks, key=lambda k: k[1])
vigenere_keyword_break = vigenere_keyword_break_mp
plaintext = vigenere_decipher(message, keyword)
fit = fitness(plaintext)
logger.debug('Vigenere keyword break attempt using key {0} gives fit of '
- '{1} and decrypt starting: {2}'.format(keyword,
+ '{1} and decrypt starting: {2}'.format(keyword,
fit, sanitise(plaintext)[:50]))
return keyword, fit
fit = fitness(plaintext)
return key, fit
sanitised_message = sanitise(message)
- results = starmap(worker, [(sanitised_message, i, fitness)
- for i in range(1, max_key_length+1)])
+ results = starmap(worker, [(sanitised_message, i, fitness)
+ for i in range(1, max_key_length+1)])
return max(results, key=lambda k: k[1])
"""
def worker(message, key_length, fitness):
splits = every_nth(sanitised_message, key_length)
- key = ''.join([chr(-caesar_break(s)[0] % 26 + ord('a')) for s in splits])
+ key = ''.join([chr(-caesar_break(s)[0] % 26 + ord('a'))
+ for s in splits])
plaintext = beaufort_decipher(message, key)
fit = fitness(plaintext)
return key, fit
sanitised_message = sanitise(message)
- results = starmap(worker, [(sanitised_message, i, fitness)
- for i in range(1, max_key_length+1)])
+ results = starmap(worker, [(sanitised_message, i, fitness)
+ for i in range(1, max_key_length+1)])
return max(results, key=lambda k: k[1])
+"""Define a variety of norms for finding distances between vectors"""
+
import collections
-from math import log10
def normalise(frequencies):
"""Scale a set of frequencies so they sum to one
-
+
>>> sorted(normalise({1: 1, 2: 0}).items())
[(1, 1.0), (2, 0.0)]
>>> sorted(normalise({1: 1, 2: 1}).items())
[(1, 0.25), (2, 0.5), (3, 0.25)]
"""
length = sum([f for f in frequencies.values()])
- return collections.defaultdict(int, ((k, v / length)
+ return collections.defaultdict(int, ((k, v / length)
for (k, v) in frequencies.items()))
def euclidean_scale(frequencies):
"""Scale a set of frequencies so they have a unit euclidean length
-
+
>>> sorted(euclidean_scale({1: 1, 2: 0}).items())
[(1, 1.0), (2, 0.0)]
>>> sorted(euclidean_scale({1: 1, 2: 1}).items()) # doctest: +ELLIPSIS
[(1, 0.408248...), (2, 0.81649658...), (3, 0.408248...)]
"""
length = sum([f ** 2 for f in frequencies.values()]) ** 0.5
- return collections.defaultdict(int, ((k, v / length)
+ return collections.defaultdict(int, ((k, v / length)
for (k, v) in frequencies.items()))
def identity_scale(frequencies):
+ """Don't scale a set of frequencies. (For use when a function expects a
+ scaling function but you don't want to supply one.)
+ """
return frequencies
def l2(frequencies1, frequencies2):
- """Finds the distances between two frequency profiles, expressed as dictionaries.
+ """Finds the distances between two frequency profiles, expressed as
+ dictionaries.
Assumes every key in frequencies1 is also in frequencies2
-
+
>>> l2({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1})
0.0
>>> l2({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS
euclidean_distance = l2
def l1(frequencies1, frequencies2):
- """Finds the distances between two frequency profiles, expressed as
+ """Finds the distances between two frequency profiles, expressed as
dictionaries. Assumes every key in frequencies1 is also in frequencies2
>>> l1({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1})
return total
def l3(frequencies1, frequencies2):
- """Finds the distances between two frequency profiles, expressed as
+ """Finds the distances between two frequency profiles, expressed as
dictionaries. Assumes every key in frequencies1 is also in frequencies2
>>> l3({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1})
return total ** (1/3)
def geometric_mean(frequencies1, frequencies2):
- """Finds the geometric mean of the absolute differences between two frequency profiles,
- expressed as dictionaries.
+ """Finds the geometric mean of the absolute differences between two
+ frequency profiles, expressed as dictionaries.
Assumes every key in frequencies1 is also in frequencies2
-
+
>>> geometric_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1})
1
>>> geometric_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1})
return total
def harmonic_mean(frequencies1, frequencies2):
- """Finds the harmonic mean of the absolute differences between two frequency profiles,
- expressed as dictionaries.
+ """Finds the harmonic mean of the absolute differences between two
+ frequency profiles, expressed as dictionaries.
Assumes every key in frequencies1 is also in frequencies2
>>> harmonic_mean({'a':2, 'b':2, 'c':2}, {'a':1, 'b':1, 'c':1})
def cosine_similarity(frequencies1, frequencies2):
- """Finds the distances between two frequency profiles, expressed as dictionaries.
+ """Finds the distances between two frequency profiles, expressed as
+ dictionaries.
Assumes every key in frequencies1 is also in frequencies2
>>> cosine_similarity({'a':1, 'b':1, 'c':1}, {'a':1, 'b':1, 'c':1}) # doctest: +ELLIPSIS