9 jupytext_version: 1.9.1
11 display_name: Python 3
16 <!-- #region Collapsed="false" -->
19 * [Office of National Statistics](https://www.ons.gov.uk/peoplepopulationandcommunity/birthsdeathsandmarriages/deaths/datasets/weeklyprovisionalfiguresondeathsregisteredinenglandandwales) (Endland and Wales) Weeks start on a Saturday.
20 * [Northern Ireland Statistics and Research Agency](https://www.nisra.gov.uk/publications/weekly-deaths) (Northern Ireland). Weeks start on a Saturday. Note that the week numbers don't match the England and Wales data.
21 * [National Records of Scotland](https://www.nrscotland.gov.uk/statistics-and-data/statistics/statistics-by-theme/vital-events/general-publications/weekly-and-monthly-data-on-births-and-deaths/weekly-data-on-births-and-deaths) (Scotland). Note that Scotland uses ISO8601 week numbers, which start on a Monday.
25 ```python Collapsed="false"
31 from scipy.stats import gmean
33 import matplotlib as mpl
34 import matplotlib.pyplot as plt
38 ```python Collapsed="false"
39 england_wales_filename = 'uk-deaths-data/publishedweek532020.xlsx'
42 ```python Collapsed="false"
43 raw_data_2015 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2015.csv',
44 parse_dates=[1, 2], dayfirst=True,
48 dh15i = raw_data_2015.iloc[:, [2]]
49 dh15i.columns = ['total_2015']
53 ```python Collapsed="false"
54 raw_data_2016 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2016.csv',
55 parse_dates=[1, 2], dayfirst=True,
59 dh16i = raw_data_2016.iloc[:, [2]]
60 dh16i.columns = ['total_2016']
64 ```python Collapsed="false"
65 raw_data_2017 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2017.csv',
66 parse_dates=[1, 2], dayfirst=True,
70 dh17i = raw_data_2017.iloc[:, [2]]
71 dh17i.columns = ['total_2017']
75 ```python Collapsed="false"
76 raw_data_2018 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2018.csv',
77 parse_dates=[1, 2], dayfirst=True,
81 dh18i = raw_data_2018.iloc[:, [2]]
82 dh18i.columns = ['total_2018']
86 ```python Collapsed="false"
87 raw_data_2019 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2019.csv',
88 parse_dates=[1, 2], dayfirst=True,
92 dh19i = raw_data_2019.iloc[:, [2]]
93 dh19i.columns = ['total_2019']
97 ```python Collapsed="false"
98 raw_data_2020_i = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2020.csv',
99 parse_dates=[1], dayfirst=True,
103 deaths_headlines_i = raw_data_2020_i.iloc[:, [1]]
104 deaths_headlines_i.columns = ['total_2020']
105 deaths_headlines_i.tail()
108 ```python Collapsed="false"
112 ```python Collapsed="false"
116 ```python Collapsed="false"
117 raw_data_s = pd.read_csv('uk-deaths-data/weekly-deaths-scotland.csv',
125 ```python Collapsed="false"
126 deaths_headlines_s = raw_data_s[reversed('2015 2016 2017 2018 2019 2020'.split())]
127 deaths_headlines_s.columns = ['total_' + c for c in deaths_headlines_s.columns]
128 deaths_headlines_s.reset_index(drop=True, inplace=True)
129 deaths_headlines_s.index = deaths_headlines_s.index + 1
133 ```python Collapsed="false"
137 ```python Collapsed="false"
138 eng_xls = pd.read_excel(england_wales_filename,
139 sheet_name="Weekly figures 2020",
140 skiprows=[0, 1, 2, 3],
147 ```python Collapsed="false"
151 ```python Collapsed="false"
152 eng_xls_columns = list(eng_xls.columns)
154 for i, c in enumerate(eng_xls_columns):
155 # print(i, c, type(c), isinstance(c, float))
156 if isinstance(c, float) and np.isnan(c):
157 if eng_xls.iloc[0].iloc[i] is not pd.NaT:
158 eng_xls_columns[i] = eng_xls.iloc[0].iloc[i]
160 # np.isnan(eng_xls_columns[0])
163 eng_xls.columns = eng_xls_columns
167 ```python Collapsed="false"
168 eng_xls['Total deaths, all ages']
171 ```python Collapsed="false"
172 eng_xls['Wales'].iloc[1:]
175 ```python Collapsed="false"
176 # raw_data_2020 = pd.read_csv('uk-deaths-data/publishedweek272020.csv',
177 # parse_dates=[1], dayfirst=True,
182 ```python Collapsed="false"
186 ```python Collapsed="false"
187 # raw_data_2020.head()
190 ```python Collapsed="false"
191 # raw_data_2020['W92000004', 'Wales']
194 ```python Collapsed="false"
195 raw_data_2019 = pd.read_csv('uk-deaths-data/publishedweek522019.csv',
196 parse_dates=[1], dayfirst=True,
199 # raw_data_2019.head()
202 ```python Collapsed="false"
203 raw_data_2018 = pd.read_csv('uk-deaths-data/publishedweek522018.csv',
204 parse_dates=[1], dayfirst=True,
207 # raw_data_2018.head()
210 ```python Collapsed="false"
211 raw_data_2017 = pd.read_csv('uk-deaths-data/publishedweek522017.csv',
212 parse_dates=[1], dayfirst=True,
215 # raw_data_2017.head()
218 ```python Collapsed="false"
219 raw_data_2016 = pd.read_csv('uk-deaths-data/publishedweek522016.csv',
220 parse_dates=[1], dayfirst=True,
223 # raw_data_2016.head()
226 ```python Collapsed="false"
227 raw_data_2015 = pd.read_csv('uk-deaths-data/publishedweek2015.csv',
228 parse_dates=[1], dayfirst=True,
231 # raw_data_2015.head()
234 ```python Collapsed="false"
235 dhw = eng_xls['Wales'].iloc[1:]
236 dhe = eng_xls['Total deaths, all ages'].iloc[1:] - dhw
237 deaths_headlines_e = pd.DataFrame({'total_2020': dhe.dropna()})
238 deaths_headlines_w = pd.DataFrame({'total_2020': dhw.dropna()})
241 ```python Collapsed="false"
242 # deaths_headlines_e = raw_data_2020.iloc[:, [1]].copy()
243 # deaths_headlines_e.columns = ['total_2020']
244 # deaths_headlines_w = raw_data_2020['W92000004'].copy()
245 # deaths_headlines_e.columns = ['total_2020']
246 # deaths_headlines_w.columns = ['total_2020']
247 # deaths_headlines_e.total_2020 -= deaths_headlines_w.total_2020
248 # deaths_headlines_e.head()
252 ```python Collapsed="false"
253 dh19e = raw_data_2019.iloc[:, [1]]
254 dh19w = raw_data_2019['W92000004']
255 dh19e.columns = ['total_2019']
256 dh19w.columns = ['total_2019']
257 dh19e.total_2019 -= dh19w.total_2019
261 ```python Collapsed="false"
265 ```python Collapsed="false"
266 dh18e = raw_data_2018.iloc[:, [1]]
267 dh18w = raw_data_2018['W92000004']
268 dh18e.columns = ['total_2018']
269 dh18w.columns = ['total_2018']
270 dh18e.total_2018 -= dh18w.total_2018
274 ```python Collapsed="false"
275 dh17e = raw_data_2017.iloc[:, [1]]
276 dh17w = raw_data_2017['W92000004']
277 dh17e.columns = ['total_2017']
278 dh17w.columns = ['total_2017']
279 dh17e.total_2017 -= dh17w.total_2017
283 ```python Collapsed="false"
284 dh16e = raw_data_2016.iloc[:, [1]]
285 dh16w = raw_data_2016['W92000004']
286 dh16e.columns = ['total_2016']
287 dh16w.columns = ['total_2016']
288 dh16e.total_2016 -= dh16w.total_2016
292 ```python Collapsed="false"
293 dh15e = raw_data_2015.iloc[:, [1]]
294 dh15w = raw_data_2015['W92000004']
295 dh15e.columns = ['total_2015']
296 dh15w.columns = ['total_2015']
297 dh15e.total_2015 -= dh15w.total_2015
301 ```python Collapsed="false"
302 # dh18 = raw_data_2018.iloc[:, [1, 2]]
303 # dh18.columns = ['total_2018', 'total_previous']
307 ```python Collapsed="false"
308 deaths_headlines_e = deaths_headlines_e.merge(dh19e['total_2019'], how='outer', left_index=True, right_index=True)
309 deaths_headlines_e = deaths_headlines_e.merge(dh18e['total_2018'], how='outer', left_index=True, right_index=True)
310 deaths_headlines_e = deaths_headlines_e.merge(dh17e['total_2017'], how='outer', left_index=True, right_index=True)
311 deaths_headlines_e = deaths_headlines_e.merge(dh16e['total_2016'], how='outer', left_index=True, right_index=True)
312 # deaths_headlines = deaths_headlines.merge(dh15['total_2015'], how='outer', left_index=True, right_index=True)
313 deaths_headlines_e = deaths_headlines_e.merge(dh15e['total_2015'], how='left', left_index=True, right_index=True)
317 ```python Collapsed="false"
318 deaths_headlines_s = raw_data_s[reversed('2015 2016 2017 2018 2019 2020'.split())]
319 deaths_headlines_s.columns = ['total_' + c for c in deaths_headlines_s.columns]
320 deaths_headlines_s.reset_index(drop=True, inplace=True)
321 deaths_headlines_s.index = deaths_headlines_s.index + 1
322 deaths_headlines_s = deaths_headlines_s.loc[1:52]
326 <!-- #region Collapsed="false" -->
327 # Correction for missing data
330 ```python Collapsed="false"
331 # deaths_headlines_s.loc[20, 'total_2020'] = 1000
335 ```python Collapsed="false"
336 deaths_headlines_w = deaths_headlines_w.merge(dh19w['total_2019'], how='outer', left_index=True, right_index=True)
337 deaths_headlines_w = deaths_headlines_w.merge(dh18w['total_2018'], how='outer', left_index=True, right_index=True)
338 deaths_headlines_w = deaths_headlines_w.merge(dh17w['total_2017'], how='outer', left_index=True, right_index=True)
339 deaths_headlines_w = deaths_headlines_w.merge(dh16w['total_2016'], how='outer', left_index=True, right_index=True)
340 # deaths_headlines = deaths_headlines.merge(dh15['total_2015'], how='outer', left_index=True, right_index=True)
341 deaths_headlines_w = deaths_headlines_w.merge(dh15w['total_2015'], how='left', left_index=True, right_index=True)
345 ```python Collapsed="false"
346 deaths_headlines_i = deaths_headlines_i.merge(dh19i['total_2019'], how='outer', left_index=True, right_index=True)
347 deaths_headlines_i = deaths_headlines_i.merge(dh18i['total_2018'], how='outer', left_index=True, right_index=True)
348 deaths_headlines_i = deaths_headlines_i.merge(dh17i['total_2017'], how='outer', left_index=True, right_index=True)
349 deaths_headlines_i = deaths_headlines_i.merge(dh16i['total_2016'], how='outer', left_index=True, right_index=True)
350 deaths_headlines_i = deaths_headlines_i.merge(dh15i['total_2015'], how='left', left_index=True, right_index=True)
354 ```python Collapsed="false"
358 ```python Collapsed="false"
359 deaths_headlines = deaths_headlines_e + deaths_headlines_w + deaths_headlines_i + deaths_headlines_s
363 ```python Collapsed="false"
364 deaths_headlines_e['previous_mean'] = deaths_headlines_e['total_2019 total_2018 total_2017 total_2016 total_2015'.split()].apply(np.mean, axis=1)
365 deaths_headlines_w['previous_mean'] = deaths_headlines_w['total_2019 total_2018 total_2017 total_2016 total_2015'.split()].apply(np.mean, axis=1)
366 deaths_headlines_s['previous_mean'] = deaths_headlines_s['total_2019 total_2018 total_2017 total_2016 total_2015'.split()].apply(np.mean, axis=1)
367 deaths_headlines_i['previous_mean'] = deaths_headlines_i['total_2019 total_2018 total_2017 total_2016 total_2015'.split()].apply(np.mean, axis=1)
368 deaths_headlines['previous_mean'] = deaths_headlines['total_2019 total_2018 total_2017 total_2016 total_2015'.split()].apply(np.mean, axis=1)
372 ```python Collapsed="false"
373 deaths_headlines['total_2020 total_2019 total_2018 total_2017 total_2016 total_2015'.split()].plot(figsize=(14, 8))
376 ```python Collapsed="false"
377 deaths_headlines[['total_2020', 'previous_mean']].plot(figsize=(10, 8))
380 ```python Collapsed="false"
381 deaths_headlines_i.plot()
384 ```python Collapsed="false"
385 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
387 dhna = deaths_headlines.dropna()
389 fig = plt.figure(figsize=(10, 10))
390 ax = fig.add_subplot(111, projection="polar")
394 np.arange(len(dhna))/float(len(dhna))*2.*np.pi),
396 # l15, = ax.plot(theta, deaths_headlines['total_2015'], color="#b56363", label="2015") # 0
397 # l16, = ax.plot(theta, deaths_headlines['total_2016'], color="#a4b563", label="2016") # 72
398 # l17, = ax.plot(theta, deaths_headlines['total_2017'], color="#63b584", label="2017") # 144
399 # l18, = ax.plot(theta, deaths_headlines['total_2018'], color="#6384b5", label="2018") # 216
400 # l19, = ax.plot(theta, deaths_headlines['total_2019'], color="#a4635b", label="2019") # 288
401 l15, = ax.plot(theta, dhna['total_2015'], color="#e47d7d", label="2015") # 0
402 l16, = ax.plot(theta, dhna['total_2016'], color="#afc169", label="2016") # 72 , d0e47d
403 l17, = ax.plot(theta, dhna['total_2017'], color="#7de4a6", label="2017") # 144
404 l18, = ax.plot(theta, dhna['total_2018'], color="#7da6e4", label="2018") # 216
405 l19, = ax.plot(theta, dhna['total_2019'], color="#d07de4", label="2019") # 288
407 lmean, = ax.plot(theta, dhna['previous_mean'], color="black", linestyle='dashed', label="mean")
409 l20, = ax.plot(theta, dhna['total_2020'], color="red", label="2020")
411 # deaths_headlines.total_2019.plot(ax=ax)
413 def _closeline(line):
414 x, y = line.get_data()
415 x = np.concatenate((x, [x[0]]))
416 y = np.concatenate((y, [y[0]]))
419 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
423 ax.set_xticklabels(dhna.index)
425 plt.title("Deaths by week over years, all UK")
426 plt.savefig('deaths-radar.png')
430 <!-- #region Collapsed="false" -->
431 # Excess deaths calculation
434 ```python Collapsed="false"
435 # raw_data_2020.loc[12, 'Week ended']
438 ```python Collapsed="false"
439 eng_xls.loc[12, 'Week ended']
442 ```python Collapsed="false"
443 # raw_data_2020.iloc[-1]['Week ended']
446 ```python Collapsed="false"
447 deaths_headlines_e.total_2020.dropna().last_valid_index()
450 ```python Collapsed="false"
451 eng_xls.loc[deaths_headlines_e.total_2020.dropna().last_valid_index(), 'Week ended']
454 ```python Collapsed="false"
455 eng_xls.loc[27, 'Week ended']
458 ```python Collapsed="false"
459 # raw_data_2020.loc[12].droplevel(1)['Week ended']
462 ```python Collapsed="false"
463 # raw_data_2020.iloc[-1].droplevel(1)['Week ended']
466 ```python Collapsed="false"
467 (deaths_headlines.loc[12:].total_2020 - deaths_headlines.loc[12:].previous_mean).sum()
470 ```python Collapsed="false"
471 (deaths_headlines.loc[12:27].total_2020 - deaths_headlines.loc[12:27].previous_mean).sum()
474 ```python Collapsed="false"
475 deaths_headlines.previous_mean.sum()
478 ```python Collapsed="false"
479 # excess_death_data = {
480 # 'start_date': str(eng_xls.loc[12, 'Week ended']),
481 # 'end_date': str(eng_xls.loc[deaths_headlines_e.total_2020.dropna().last_valid_index(), 'Week ended']),
482 # 'excess_deaths': (deaths_headlines.loc[12:].total_2020 - deaths_headlines.loc[12:].previous_mean).sum()
485 # with open('excess_deaths.json', 'w') as f:
486 # json.dump(excess_death_data, f)
489 ```python Collapsed="false"
490 # excess_death_data = {
491 # 'start_date': str(eng_xls.loc[12, 'Week ended']),
492 # 'end_date': str(eng_xls.loc[27, 'Week ended']),
493 # 'excess_deaths': (deaths_headlines.loc[12:27].total_2020 - deaths_headlines.loc[12:27].previous_mean).sum()
496 # with open('excess_deaths.json', 'w') as f:
497 # json.dump(excess_death_data, f)
500 ```python Collapsed="false"
501 # excess_death_data = {
502 # 'start_date': str(raw_data_2020.loc[12].droplevel(1)['Week ended']),
503 # 'end_date': str(raw_data_2020.iloc[-1].droplevel(1)['Week ended']),
504 # 'excess_deaths': (deaths_headlines.loc[12:].total_2020 - deaths_headlines.loc[12:].previous_mean).sum()
507 # with open('excess_deaths.json', 'w') as f:
508 # json.dump(excess_death_data, f)
511 ```python Collapsed="false"
512 eng_xls['Week ended']
515 ```python Collapsed="false"
516 # raw_data_2020.droplevel(1, axis='columns')['Week ended']
519 ```python Collapsed="false"
520 deaths_by_week = deaths_headlines.merge(eng_xls['Week ended'], left_index=True, right_index=True)
521 deaths_by_week.rename(columns={'Week ended': 'week_ended'}, inplace=True)
522 deaths_by_week.to_csv('deaths_by_week.csv', header=True, index=False)
525 ```python Collapsed="false"
526 # deaths_by_week = deaths_headlines.merge(raw_data_2020.droplevel(1, axis='columns')['Week ended'], left_index=True, right_index=True)
527 # deaths_by_week.rename(columns={'Week ended': 'week_ended'}, inplace=True)
528 # deaths_by_week.to_csv('deaths_by_week.csv', header=True, index=False)
531 <!-- #region Collapsed="false" -->
532 # Plots for UK nations
535 ```python Collapsed="false"
536 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
538 fig = plt.figure(figsize=(10, 10))
539 ax = fig.add_subplot(111, projection="polar")
543 np.arange(len(deaths_headlines_e))/float(len(deaths_headlines_e))*2.*np.pi),
545 l15, = ax.plot(theta, deaths_headlines_e['total_2015'], color="#e47d7d", label="2015") # 0
546 l16, = ax.plot(theta, deaths_headlines_e['total_2016'], color="#afc169", label="2016") # 72 , d0e47d
547 l17, = ax.plot(theta, deaths_headlines_e['total_2017'], color="#7de4a6", label="2017") # 144
548 l18, = ax.plot(theta, deaths_headlines_e['total_2018'], color="#7da6e4", label="2018") # 216
549 l19, = ax.plot(theta, deaths_headlines_e['total_2019'], color="#d07de4", label="2019") # 288
551 lmean, = ax.plot(theta, deaths_headlines_e['previous_mean'], color="black", linestyle='dashed', label="mean")
553 l20, = ax.plot(theta, deaths_headlines_e['total_2020'], color="red", label="2020")
555 # deaths_headlines.total_2019.plot(ax=ax)
557 def _closeline(line):
558 x, y = line.get_data()
559 x = np.concatenate((x, [x[0]]))
560 y = np.concatenate((y, [y[0]]))
563 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
567 ax.set_xticklabels(deaths_headlines_e.index)
569 plt.title("Deaths by week over years, England")
570 plt.savefig('deaths-radar_england.png')
574 ```python Collapsed="false"
575 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
577 fig = plt.figure(figsize=(10, 10))
578 ax = fig.add_subplot(111, projection="polar")
582 np.arange(len(deaths_headlines_w))/float(len(deaths_headlines_w))*2.*np.pi),
584 l15, = ax.plot(theta, deaths_headlines_w['total_2015'], color="#e47d7d", label="2015") # 0
585 l16, = ax.plot(theta, deaths_headlines_w['total_2016'], color="#afc169", label="2016") # 72 , d0e47d
586 l17, = ax.plot(theta, deaths_headlines_w['total_2017'], color="#7de4a6", label="2017") # 144
587 l18, = ax.plot(theta, deaths_headlines_w['total_2018'], color="#7da6e4", label="2018") # 216
588 l19, = ax.plot(theta, deaths_headlines_w['total_2019'], color="#d07de4", label="2019") # 288
590 lmean, = ax.plot(theta, deaths_headlines_w['previous_mean'], color="black", linestyle='dashed', label="mean")
592 l20, = ax.plot(theta, deaths_headlines_w['total_2020'], color="red", label="2020")
595 def _closeline(line):
596 x, y = line.get_data()
597 x = np.concatenate((x, [x[0]]))
598 y = np.concatenate((y, [y[0]]))
601 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
605 ax.set_xticklabels(deaths_headlines_w.index)
607 plt.title("Deaths by week over years, Wales")
608 plt.savefig('deaths-radar_wales.png')
612 ```python Collapsed="false"
613 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
615 fig = plt.figure(figsize=(10, 10))
616 ax = fig.add_subplot(111, projection="polar")
620 np.arange(len(deaths_headlines_s))/float(len(deaths_headlines_s))*2.*np.pi),
622 l15, = ax.plot(theta, deaths_headlines_s['total_2015'], color="#e47d7d", label="2015") # 0
623 l16, = ax.plot(theta, deaths_headlines_s['total_2016'], color="#afc169", label="2016") # 72 , d0e47d
624 l17, = ax.plot(theta, deaths_headlines_s['total_2017'], color="#7de4a6", label="2017") # 144
625 l18, = ax.plot(theta, deaths_headlines_s['total_2018'], color="#7da6e4", label="2018") # 216
626 l19, = ax.plot(theta, deaths_headlines_s['total_2019'], color="#d07de4", label="2019") # 288
628 lmean, = ax.plot(theta, deaths_headlines_s['previous_mean'], color="black", linestyle='dashed', label="mean")
630 l20, = ax.plot(theta, deaths_headlines_s['total_2020'], color="red", label="2020")
633 def _closeline(line):
634 x, y = line.get_data()
635 x = np.concatenate((x, [x[0]]))
636 y = np.concatenate((y, [y[0]]))
639 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
643 ax.set_xticklabels(deaths_headlines_s.index)
645 plt.title("Deaths by week over years, Scotland")
646 plt.savefig('deaths-radar_scotland.png')
650 ```python Collapsed="false"
651 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
653 fig = plt.figure(figsize=(10, 10))
654 ax = fig.add_subplot(111, projection="polar")
658 np.arange(len(deaths_headlines_i))/float(len(deaths_headlines_i))*2.*np.pi),
660 l15, = ax.plot(theta, deaths_headlines_i['total_2015'], color="#e47d7d", label="2015") # 0
661 l16, = ax.plot(theta, deaths_headlines_i['total_2016'], color="#afc169", label="2016") # 72 , d0e47d
662 l17, = ax.plot(theta, deaths_headlines_i['total_2017'], color="#7de4a6", label="2017") # 144
663 l18, = ax.plot(theta, deaths_headlines_i['total_2018'], color="#7da6e4", label="2018") # 216
664 l19, = ax.plot(theta, deaths_headlines_i['total_2019'], color="#d07de4", label="2019") # 288
666 lmean, = ax.plot(theta, deaths_headlines_i['previous_mean'], color="black", linestyle='dashed', label="mean")
668 l20, = ax.plot(theta, deaths_headlines_i['total_2020'], color="red", label="2020")
671 def _closeline(line):
672 x, y = line.get_data()
673 x = np.concatenate((x, [x[0]]))
674 y = np.concatenate((y, [y[0]]))
677 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
681 ax.set_xticklabels(deaths_headlines_i.index)
683 plt.title("Deaths by week over years, Northern Ireland")
684 plt.savefig('deaths-radar_northern_ireland.png')
688 ```python Collapsed="false"
689 # list(raw_data_2020.columns)
692 ```python Collapsed="false"
693 # deaths_headlines_e = raw_data_2020.iloc[:, [1]].copy()
694 # deaths_headlines_e.columns = ['total_2020']
695 # deaths_headlines_w = raw_data_2020['W92000004'].copy()
696 # deaths_headlines_e.columns = ['total_2020']
697 # deaths_headlines_w.columns = ['total_2020']
698 # deaths_headlines_e.total_2020 -= deaths_headlines_w.total_2020
699 # deaths_headlines_e.head()
703 ```python Collapsed="false"
707 ```python Collapsed="false"