-scores = collections.defaultdict(int)
-for metric in range(len(metrics)):
- scores[metric_names[metric]] = collections.defaultdict(int)
- for corpus_freqency in range(len(corpus_frequencies)):
- scores[metric_names[metric]][corpus_frequency_names[corpus_freqency]] = collections.defaultdict(int)
- for scaling in range(len(scalings)):
- scores[metric_names[metric]][corpus_frequency_names[corpus_freqency]][scaling_names[scaling]] = collections.defaultdict(int)
- for message_length in message_lengths:
- for i in range(trials):
- sample_start = random.randint(0, corpus_length - message_length)
- sample = corpus[sample_start:(sample_start + message_length)]
- key = random.randint(1, 25)
- sample_ciphertext = caesar_encipher(sample, key)
- (found_key, score) = caesar_break(sample_ciphertext,
- metric=metrics[metric],
- target_frequencies=corpus_frequencies[corpus_freqency],
- message_frequency_scaling=scalings[scaling])
- if found_key == key:
- scores[metric_names[metric]][corpus_frequency_names[corpus_freqency]][scaling_names[scaling]][message_length] += 1
- print(', '.join([metric_names[metric],
- corpus_frequency_names[corpus_freqency],
- scaling_names[scaling],
- str(message_length),
- str(scores[metric_names[metric]][corpus_frequency_names[corpus_freqency]][scaling_names[scaling]][message_length] / trials) ]))
-
-
-with open('caesar_break_parameter_trials.csv', 'w') as f:
- for metric in range(len(metrics)):
- for corpus_freqency in range(len(corpus_frequencies)):
- for scaling in range(len(scalings)):
- for message_length in message_lengths:
- print(', '.join([metric_names[metric],
- corpus_frequency_names[corpus_freqency],
- scaling_names[scaling],
- str(message_length),
- str(scores[metric_names[metric]][corpus_frequency_names[corpus_freqency]][scaling_names[scaling]][message_length] / trials) ]),
- file=f)
-
-
\ No newline at end of file
+scores = {}
+
+
+def make_frequency_compare_function(target_frequency, frequency_scaling, metric, invert):
+ def frequency_compare(text):
+ counts = frequency_scaling(frequencies(text))
+ if invert:
+ score = -1 * metric(target_frequency, counts)
+ else:
+ score = metric(target_frequency, counts)
+ return score
+ return frequency_compare
+
+
+def scoring_functions():
+ return [{'func': make_frequency_compare_function(s['corpus_frequency'],
+ s['scaling'], m['func'], m['invert']),
+ 'name': '{} + {}'.format(m['name'], s['name'])}
+ for m in metrics
+ for s in scalings] + [{'func': Pletters, 'name': 'Pletters'}]
+
+def eval_scores():
+ [eval_one_score(f, l)
+ for f in scoring_functions()
+ for l in message_lengths]
+
+def eval_one_score(scoring_function, message_length):
+ print(scoring_function['name'], message_length)
+ if scoring_function['name'] not in scores:
+ scores[scoring_function['name']] = collections.defaultdict(int)
+ for _ in range(trials):
+ sample_start = random.randint(0, corpus_length - message_length)
+ sample = corpus[sample_start:(sample_start + message_length)]
+ key = random.randint(1, 25)
+ ciphertext = caesar_encipher(sample, key)
+ found_key, _ = caesar_break(ciphertext, scoring_function['func'])
+ if found_key == key:
+ scores[scoring_function['name']][message_length] += 1
+ return scores[scoring_function['name']][message_length]
+
+def show_results():
+ with open('caesar_break_parameter_trials.csv', 'w') as f:
+ print(',message_length', file = f)
+ print('scoring,', ', '.join([str(l) for l in message_lengths]), file = f)
+ for scoring in sorted(scores.keys()):
+ for length in message_lengths:
+ print(scoring, end='', sep='', file=f)
+ for l in message_lengths:
+ print(',', scores[scoring][l] / trials, end='', file=f)
+ print('', file = f)
+
+eval_scores()
+show_results()