All tests now running and passing
authorNeil Smith <neil.git@njae.me.uk>
Tue, 6 Mar 2018 16:50:55 +0000 (16:50 +0000)
committerNeil Smith <neil.git@njae.me.uk>
Tue, 6 Mar 2018 16:50:55 +0000 (16:50 +0000)
20 files changed:
cipher/affine.py
cipher/amsco.py
cipher/autokey.py
cipher/bifid.py
cipher/cadenus.py
cipher/caesar.py
cipher/column_transposition.py
cipher/hill.py
cipher/keyword_cipher.py
cipher/pocket_enigma.py
cipher/polybius.py
cipher/railfence.py
cipher/vigenere.py
run_tests [new file with mode: 0755]
support/language_models.py
support/lettercount.py
support/make-cracking-dictionary.py
support/text_prettify.py
support/utilities.py
test/test_doctests.py

index 6ba90e6d1e4976f4d40f7024fc5f1bc10647b17f..8a9e5845a1afb784d0b1ee607a5a31dd5ce7d1e3 100644 (file)
@@ -1,5 +1,5 @@
-from utilities import *
-from language_models import *
+from support.utilities import *
+from support.language_models import *
 from logger import logger
 
 
index 4eeeb5961cde67a55a6b28229dfccd729a8d9bcb..3d4e49b6ac1df23f8144de3e9a9da48e875b80aa 100644 (file)
@@ -2,9 +2,9 @@ from enum import Enum
 import multiprocessing 
 import itertools
 
-from utilities import *
-from language_models import *
-from column_transposition import transpositions
+from support.utilities import *
+from support.language_models import *
+from cipher.column_transposition import transpositions, transpositions_of
 
 from logger import logger
 
index b84f0a94c08c1acefc4cece55054cab15ce5b8f3..ffdc3a95d96bbe0e972d19710a0a5934a4591e3f 100644 (file)
@@ -1,6 +1,7 @@
-from utilities import *
-from language_models import *
 import multiprocessing 
+from support.utilities import *
+from support.language_models import *
+from cipher.caesar import caesar_encipher_letter, caesar_decipher_letter
 
 from logger import logger
 
index 4a33456414fc7ee9d0302a285b1117387dd04e2e..478b239ae4b1e520f8f9bfca40c210016c87ba17 100644 (file)
@@ -1,7 +1,7 @@
 import multiprocessing 
-from utilities import *
-from language_models import *
-from keyword_cipher import KeywordWrapAlphabet
+from support.utilities import *
+from support.language_models import *
+from cipher.keyword_cipher import KeywordWrapAlphabet, keyword_cipher_alphabet_of
 
 from logger import logger
 
index 0d3b4156e53d86739cb2648c38834880cab2fb96..1e569affc60293feff3ed193678dc6e192148e92 100644 (file)
@@ -1,6 +1,7 @@
-from utilities import *
-from language_models import *
 from itertools import chain
+from support.utilities import *
+from support.language_models import *
+from cipher.column_transposition import transpositions_of
 
 from logger import logger
 
index 4d9a7220320067d372e53275011c4d1a8283ec44..ec878fcf2fe941d029853d0bed103e9250925054 100644 (file)
@@ -1,5 +1,6 @@
-from utilities import *
-from language_models import *
+from support.utilities import *
+from support.language_models import *
+
 from logger import logger
 
 def caesar_encipher_letter(accented_letter, shift):
index a141ff2c1f8fcc263a43884f307bfdb3f8003d8a..7e0fc28a3d002208ccfd0ea0c6958216aa61fa09 100644 (file)
@@ -1,7 +1,8 @@
-from utilities import *
-from language_models import *
+import math
 import multiprocessing 
 from itertools import chain
+from support.utilities import *
+from support.language_models import *
 
 from logger import logger
 
@@ -254,7 +255,7 @@ def scytale_break_mp(message, max_key_length=20,
         fitness=Ptrigrams) # doctest: +ELLIPSIS
     (5, -997.0129085...)
     """
-    with Pool() as pool:
+    with multiprocessing.Pool() as pool:
         helper_args = [(message, trans, False, True, fitness)
             for trans in
                 [[col for col in range(math.ceil(len(message)/rows))]
index 75048f9a52ddd3840ef19d614455de848f314153..24bac7f986f3346f028e516bd69832a45ba015a8 100644 (file)
@@ -1,9 +1,10 @@
-from utilities import *
-from language_models import *
 import multiprocessing
 import numpy as np
 from numpy import matrix
 from numpy import linalg
+from support.utilities import *
+from support.language_models import *
+from cipher.affine import modular_division_table
 
 from logger import logger
 
index 68c8904a6db9a19758d5e332c1374eaa27eb0382..30fd256c3d44967e6d01d23ab28b13b219ab6dba 100644 (file)
@@ -1,8 +1,8 @@
-from utilities import *
-from language_models import *
 from enum import Enum
 # from itertools import starmap
 import multiprocessing
+from support.utilities import *
+from support.language_models import *
 
 from logger import logger
 
index 557bda3c1e655ae720d106afd36acde0bc12bf0a..a51955acad335d9409aca193fe39b7e9eb15e487 100644 (file)
@@ -1,5 +1,5 @@
-from utilities import *
-from language_models import *
+from support.utilities import *
+from support.language_models import *
 
 from logger import logger
 
index 79aeb386a5f2920893a83a2f2cef8594a9439080..965c3bba2441fc810733d7c2eae82955576d2c76 100644 (file)
@@ -1,8 +1,7 @@
-from utilities import *
-from language_models import *
 import multiprocessing 
-
-from keyword_cipher import KeywordWrapAlphabet
+from support.utilities import *
+from support.language_models import *
+from cipher.keyword_cipher import KeywordWrapAlphabet, keyword_cipher_alphabet_of
 
 from logger import logger
 
index 78154aaff905f994b211f4041cd48838e6239ed6..7d6ac31164cf955dc0b3f87b20e98d23409f2e8a 100644 (file)
@@ -1,8 +1,9 @@
-from utilities import *
-from language_models import *
+import math
 from enum import Enum
-from itertools import starmap
-from itertools import zip_longest
+from itertools import starmap, zip_longest
+from support.utilities import *
+from support.language_models import *
+
 
 from logger import logger
 
index dcf4a2bd2919021f0c21acc693886677b4f61eaa..f54fe6f71afe94b505d3af4481a9bf9f1a41ba61 100644 (file)
@@ -1,8 +1,8 @@
-from utilities import *
-from language_models import *
 from enum import Enum
 from itertools import starmap, cycle
 import multiprocessing
+from support.utilities import *
+from support.language_models import *
 
 from logger import logger
 
diff --git a/run_tests b/run_tests
new file mode 100755 (executable)
index 0000000..42a8108
--- /dev/null
+++ b/run_tests
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+python3 -m unittest discover test
index 53a383dbbaac1233a7189243eaab6c16b4ab11d9..791d0836dbc85da75bf4004f64fc31f4710a8531 100644 (file)
@@ -1,12 +1,11 @@
 import string
 import random
 import collections
-import unicodedata
 import itertools
 from math import log10
 import os 
 
-import norms
+import support.norms
 
 def datafile(name, sep='\t'):
     """Read key,value pairs from file.
@@ -17,13 +16,13 @@ def datafile(name, sep='\t'):
             yield [splits[0], int(splits[1])]
 
 english_counts = collections.Counter(dict(datafile('count_1l.txt')))
-normalised_english_counts = norms.normalise(english_counts)
+normalised_english_counts = support.norms.normalise(english_counts)
 
 english_bigram_counts = collections.Counter(dict(datafile('count_2l.txt')))
-normalised_english_bigram_counts = norms.normalise(english_bigram_counts)
+normalised_english_bigram_counts = support.norms.normalise(english_bigram_counts)
 
 english_trigram_counts = collections.Counter(dict(datafile('count_3l.txt')))
-normalised_english_trigram_counts = norms.normalise(english_trigram_counts)
+normalised_english_trigram_counts = support.norms.normalise(english_trigram_counts)
 
 with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'words.txt'), 'r') as f:
     keywords = [line.rstrip() for line in f]
@@ -112,9 +111,9 @@ def cosine_distance_score(text):
     >>> cosine_distance_score('abcabc') # doctest: +ELLIPSIS
     0.73777...
     """
-    # return norms.cosine_distance(english_counts, 
+    # return support.norms.cosine_distance(english_counts, 
     #     collections.Counter(sanitise(text)))
-    return 1 - norms.cosine_similarity(english_counts, 
+    return 1 - support.norms.cosine_similarity(english_counts, 
         collections.Counter(sanitise(text)))
 
 
index 4a7082d1068669762d1c8526c761382d07ed6182..c095cefb6a67ab9a9e21d97f85b0370c3a66e0fa 100644 (file)
@@ -1,8 +1,9 @@
 import collections
 import string
+from utilities import sanitise
 
-def sanitise(text):
-    return [l.lower() for l in text if l in string.ascii_letters]
+def sanitise(text):
+    return [l.lower() for l in text if l in string.ascii_letters]
 
 corpora = ['shakespeare.txt', 'sherlock-holmes.txt', 'war-and-peace.txt']
 counts = collections.defaultdict(int)
index 37de917608fb63b9f730fcf04cfc8c86035e7e58..5d84e61d4b63e294bfdbf39c8d6e9b8de3420841 100644 (file)
@@ -1,4 +1,5 @@
-import language_models
+# import language_models
+import utilities
 
 american = set(open('/usr/share/dict/american-english', 'r').readlines())
 british = set(open('/usr/share/dict/british-english', 'r').readlines())
@@ -11,7 +12,7 @@ words = american | british | cracklib
 # for w in words:
     # sanitised_words.add(language_models.sanitise(w))
     
-sanitised_words = set(language_models.sanitise(w) for w in words)
+sanitised_words = set(utilities.sanitise(w) for w in words)
 
 sanitised_words.discard('')
 
index d3a6ffa6e008f8ab72c8764a5b33d03bb00b81a0..60963f8ce712a6940ecf53d0314c620d04cd7a6c 100644 (file)
@@ -1,6 +1,6 @@
-from segment import segment
-from utilities import cat, sanitise
 import string
+from support.segment import segment
+from support.utilities import cat, sanitise
 
 
 def tpack(text, width=100):
index ca984a30f96e4b09545eb09b277a5b0dc188be28..3125a9436dbad1d8ec7738f7b347aeef8389fa30 100644 (file)
@@ -1,5 +1,6 @@
 import string
 import collections
+import unicodedata
 from itertools import zip_longest
 
 # join a a list of letters into a string
index 31f2a17cdd50627f46484cc9f762d871b6c5619b..0af5050de483c8e34b7a274d2743925e62dd4faf 100644 (file)
@@ -1,7 +1,33 @@
 import unittest
 import doctest
-import cipher
+
+import cipher.caesar
+import cipher.affine
+import cipher.keyword_cipher
+import cipher.polybius
+import cipher.column_transposition
+import cipher.railfence
+import cipher.cadenus
+import cipher.hill
+import cipher.amsco
+import cipher.bifid
+import cipher.autokey
+import cipher.pocket_enigma
+
 
 def load_tests(loader, tests, ignore):
-    tests.addTests(doctest.DocTestSuite(cipher))
+
+    tests.addTests(doctest.DocTestSuite(cipher.caesar))
+    tests.addTests(doctest.DocTestSuite(cipher.affine))
+    tests.addTests(doctest.DocTestSuite(cipher.keyword_cipher))
+    tests.addTests(doctest.DocTestSuite(cipher.polybius))
+    tests.addTests(doctest.DocTestSuite(cipher.column_transposition))
+    tests.addTests(doctest.DocTestSuite(cipher.railfence))
+    tests.addTests(doctest.DocTestSuite(cipher.cadenus))
+    tests.addTests(doctest.DocTestSuite(cipher.hill))
+    tests.addTests(doctest.DocTestSuite(cipher.amsco))
+    tests.addTests(doctest.DocTestSuite(cipher.bifid))
+    tests.addTests(doctest.DocTestSuite(cipher.autokey))
+    tests.addTests(doctest.DocTestSuite(cipher.pocket_enigma, 
+        extraglobs={'pe': cipher.pocket_enigma.PocketEnigma(1, 'a')}))
     return tests