+scores = {}
+
+
+def make_frequency_compare_function(target_frequency, frequency_scaling, metric, invert):
+ def frequency_compare(text):
+ counts = frequency_scaling(frequencies(text))
+ if invert:
+ score = -1 * metric(target_frequency, counts)
+ else:
+ score = metric(target_frequency, counts)
+ return score
+ return frequency_compare
+
+
+def scoring_functions():
+ return [{'func': make_frequency_compare_function(s['corpus_frequency'],
+ s['scaling'], m['func'], m['invert']),
+ 'name': '{} + {}'.format(m['name'], s['name'])}
+ for m in metrics
+ for s in scalings] + [{'func': Pletters, 'name': 'Pletters'}]
+
+def eval_scores():
+ [eval_one_score(f, l)
+ for f in scoring_functions()
+ for l in message_lengths]
+
+def eval_one_score(scoring_function, message_length):
+ print(scoring_function['name'], message_length)
+ if scoring_function['name'] not in scores:
+ scores[scoring_function['name']] = collections.defaultdict(int)
+ for _ in range(trials):
+ sample_start = random.randint(0, corpus_length - message_length)
+ sample = corpus[sample_start:(sample_start + message_length)]
+ key = random.randint(1, 25)
+ ciphertext = caesar_encipher(sample, key)
+ found_key, _ = caesar_break(ciphertext, scoring_function['func'])
+ if found_key == key:
+ scores[scoring_function['name']][message_length] += 1
+ return scores[scoring_function['name']][message_length]
+
+def show_results():
+ with open('caesar_break_parameter_trials.csv', 'w') as f:
+ print(',message_length', file = f)
+ print('scoring,', ', '.join([str(l) for l in message_lengths]), file = f)
+ for scoring in sorted(scores.keys()):
+ for length in message_lengths:
+ print(scoring, end='', sep='', file=f)
+ for l in message_lengths:
+ print(',', scores[scoring][l] / trials, end='', file=f)
+ print('', file = f)