-from utilities import *
-from language_models import *
+from support.utilities import *
+from support.language_models import *
from logger import logger
import multiprocessing
import itertools
-from utilities import *
-from language_models import *
-from column_transposition import transpositions
+from support.utilities import *
+from support.language_models import *
+from cipher.column_transposition import transpositions, transpositions_of
from logger import logger
-from utilities import *
-from language_models import *
import multiprocessing
+from support.utilities import *
+from support.language_models import *
+from cipher.caesar import caesar_encipher_letter, caesar_decipher_letter
from logger import logger
import multiprocessing
-from utilities import *
-from language_models import *
-from keyword_cipher import KeywordWrapAlphabet
+from support.utilities import *
+from support.language_models import *
+from cipher.keyword_cipher import KeywordWrapAlphabet, keyword_cipher_alphabet_of
from logger import logger
-from utilities import *
-from language_models import *
from itertools import chain
+from support.utilities import *
+from support.language_models import *
+from cipher.column_transposition import transpositions_of
from logger import logger
-from utilities import *
-from language_models import *
+from support.utilities import *
+from support.language_models import *
+
from logger import logger
def caesar_encipher_letter(accented_letter, shift):
-from utilities import *
-from language_models import *
+import math
import multiprocessing
from itertools import chain
+from support.utilities import *
+from support.language_models import *
from logger import logger
fitness=Ptrigrams) # doctest: +ELLIPSIS
(5, -997.0129085...)
"""
- with Pool() as pool:
+ with multiprocessing.Pool() as pool:
helper_args = [(message, trans, False, True, fitness)
for trans in
[[col for col in range(math.ceil(len(message)/rows))]
-from utilities import *
-from language_models import *
import multiprocessing
import numpy as np
from numpy import matrix
from numpy import linalg
+from support.utilities import *
+from support.language_models import *
+from cipher.affine import modular_division_table
from logger import logger
-from utilities import *
-from language_models import *
from enum import Enum
# from itertools import starmap
import multiprocessing
+from support.utilities import *
+from support.language_models import *
from logger import logger
-from utilities import *
-from language_models import *
+from support.utilities import *
+from support.language_models import *
from logger import logger
-from utilities import *
-from language_models import *
import multiprocessing
-
-from keyword_cipher import KeywordWrapAlphabet
+from support.utilities import *
+from support.language_models import *
+from cipher.keyword_cipher import KeywordWrapAlphabet, keyword_cipher_alphabet_of
from logger import logger
-from utilities import *
-from language_models import *
+import math
from enum import Enum
-from itertools import starmap
-from itertools import zip_longest
+from itertools import starmap, zip_longest
+from support.utilities import *
+from support.language_models import *
+
from logger import logger
-from utilities import *
-from language_models import *
from enum import Enum
from itertools import starmap, cycle
import multiprocessing
+from support.utilities import *
+from support.language_models import *
from logger import logger
--- /dev/null
+#!/bin/bash
+
+python3 -m unittest discover test
import string
import random
import collections
-import unicodedata
import itertools
from math import log10
import os
-import norms
+import support.norms
def datafile(name, sep='\t'):
"""Read key,value pairs from file.
yield [splits[0], int(splits[1])]
english_counts = collections.Counter(dict(datafile('count_1l.txt')))
-normalised_english_counts = norms.normalise(english_counts)
+normalised_english_counts = support.norms.normalise(english_counts)
english_bigram_counts = collections.Counter(dict(datafile('count_2l.txt')))
-normalised_english_bigram_counts = norms.normalise(english_bigram_counts)
+normalised_english_bigram_counts = support.norms.normalise(english_bigram_counts)
english_trigram_counts = collections.Counter(dict(datafile('count_3l.txt')))
-normalised_english_trigram_counts = norms.normalise(english_trigram_counts)
+normalised_english_trigram_counts = support.norms.normalise(english_trigram_counts)
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'words.txt'), 'r') as f:
keywords = [line.rstrip() for line in f]
>>> cosine_distance_score('abcabc') # doctest: +ELLIPSIS
0.73777...
"""
- # return norms.cosine_distance(english_counts,
+ # return support.norms.cosine_distance(english_counts,
# collections.Counter(sanitise(text)))
- return 1 - norms.cosine_similarity(english_counts,
+ return 1 - support.norms.cosine_similarity(english_counts,
collections.Counter(sanitise(text)))
import collections
import string
+from utilities import sanitise
-def sanitise(text):
- return [l.lower() for l in text if l in string.ascii_letters]
+# def sanitise(text):
+# return [l.lower() for l in text if l in string.ascii_letters]
corpora = ['shakespeare.txt', 'sherlock-holmes.txt', 'war-and-peace.txt']
counts = collections.defaultdict(int)
-import language_models
+# import language_models
+import utilities
american = set(open('/usr/share/dict/american-english', 'r').readlines())
british = set(open('/usr/share/dict/british-english', 'r').readlines())
# for w in words:
# sanitised_words.add(language_models.sanitise(w))
-sanitised_words = set(language_models.sanitise(w) for w in words)
+sanitised_words = set(utilities.sanitise(w) for w in words)
sanitised_words.discard('')
-from segment import segment
-from utilities import cat, sanitise
import string
+from support.segment import segment
+from support.utilities import cat, sanitise
def tpack(text, width=100):
import string
import collections
+import unicodedata
from itertools import zip_longest
# join a a list of letters into a string
import unittest
import doctest
-import cipher
+
+import cipher.caesar
+import cipher.affine
+import cipher.keyword_cipher
+import cipher.polybius
+import cipher.column_transposition
+import cipher.railfence
+import cipher.cadenus
+import cipher.hill
+import cipher.amsco
+import cipher.bifid
+import cipher.autokey
+import cipher.pocket_enigma
+
def load_tests(loader, tests, ignore):
- tests.addTests(doctest.DocTestSuite(cipher))
+
+ tests.addTests(doctest.DocTestSuite(cipher.caesar))
+ tests.addTests(doctest.DocTestSuite(cipher.affine))
+ tests.addTests(doctest.DocTestSuite(cipher.keyword_cipher))
+ tests.addTests(doctest.DocTestSuite(cipher.polybius))
+ tests.addTests(doctest.DocTestSuite(cipher.column_transposition))
+ tests.addTests(doctest.DocTestSuite(cipher.railfence))
+ tests.addTests(doctest.DocTestSuite(cipher.cadenus))
+ tests.addTests(doctest.DocTestSuite(cipher.hill))
+ tests.addTests(doctest.DocTestSuite(cipher.amsco))
+ tests.addTests(doctest.DocTestSuite(cipher.bifid))
+ tests.addTests(doctest.DocTestSuite(cipher.autokey))
+ tests.addTests(doctest.DocTestSuite(cipher.pocket_enigma,
+ extraglobs={'pe': cipher.pocket_enigma.PocketEnigma(1, 'a')}))
return tests