9 jupytext_version: 1.11.1
11 display_name: Python 3
16 <!-- #region Collapsed="false" -->
19 * [Office of National Statistics](https://www.ons.gov.uk/peoplepopulationandcommunity/birthsdeathsandmarriages/deaths/datasets/weeklyprovisionalfiguresondeathsregisteredinenglandandwales) (Endland and Wales) Weeks start on a Saturday.
20 * [Northern Ireland Statistics and Research Agency](https://www.nisra.gov.uk/publications/weekly-deaths) (Northern Ireland). Weeks start on a Saturday. Note that the week numbers don't match the England and Wales data.
21 * [National Records of Scotland](https://www.nrscotland.gov.uk/statistics-and-data/statistics/statistics-by-theme/vital-events/general-publications/weekly-and-monthly-data-on-births-and-deaths/weekly-data-on-births-and-deaths) (Scotland). Note that Scotland uses ISO8601 week numbers, which start on a Monday.
25 ```python Collapsed="false"
31 from scipy.stats import gmean
34 import matplotlib as mpl
35 import matplotlib.pyplot as plt
38 from sqlalchemy.types import Integer, Text, String, DateTime, Float
39 from sqlalchemy import create_engine
43 ```python Collapsed="false"
44 connection_string = 'postgresql://covid:3NbjJTkT63@localhost/covid'
47 ```python Collapsed="false"
48 %sql $connection_string
51 ```python Collapsed="false"
52 conn = create_engine(connection_string)
53 engine = create_engine(connection_string)
56 ```python Collapsed="false"
57 england_wales_filename = 'uk-deaths-data/publishedweek532020.xlsx'
62 drop table if exists all_causes_deaths;
63 create table all_causes_deaths (
69 CONSTRAINT week_nation PRIMARY KEY(year, week, nation)
72 with engine.connect() as connection:
73 connection.execute(query_string)
76 ```python Collapsed="false"
77 raw_data_2015 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2015.csv',
78 parse_dates=[1, 2], dayfirst=True,
82 dh15i = raw_data_2015.iloc[:, [0, 3]]
83 dh15i.set_index(dh15i.columns[0], inplace=True)
84 dh15i.columns = ['total_2015']
88 ```python Collapsed="false"
92 ```python Collapsed="false"
93 rd = raw_data_2015.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
94 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2015P)': 'deaths',
95 'Registration Week': 'week'}
98 rd['nation'] = 'Northern Ireland'
102 ```python Collapsed="false"
110 ```python Collapsed="false"
111 %sql select * from all_causes_deaths limit 10
114 ```python Collapsed="false"
115 raw_data_2016 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2016.csv',
116 parse_dates=[1, 2], dayfirst=True,
121 # dh16i = raw_data_2016.iloc[:, [2]]
122 # dh16i.columns = ['total_2016']
124 dh16i = raw_data_2016.iloc[:, [0, 3]]
125 dh16i.set_index(dh16i.columns[0], inplace=True)
126 dh16i.columns = ['total_2016']
130 ```python Collapsed="false"
131 rd = raw_data_2016.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
132 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2016P)': 'deaths',
133 'Registration Week': 'week'}
136 rd['nation'] = 'Northern Ireland'
140 ```python Collapsed="false"
148 ```python Collapsed="false"
149 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation)
152 ```python Collapsed="false"
153 raw_data_2017 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2017.csv',
154 parse_dates=[1, 2], dayfirst=True,
159 dh17i = raw_data_2017.iloc[:, [0, 3]]
160 dh17i.set_index(dh17i.columns[0], inplace=True)
161 dh17i.columns = ['total_2017']
165 ```python Collapsed="false"
166 rd = raw_data_2017.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
167 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2017P)': 'deaths',
168 'Registration Week': 'week'}
171 rd['nation'] = 'Northern Ireland'
175 ```python Collapsed="false"
183 ```python Collapsed="false"
184 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation)
187 ```python Collapsed="false"
188 raw_data_2018 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2018.csv',
189 parse_dates=[1, 2], dayfirst=True,
194 dh18i = raw_data_2018.iloc[:, [0, 3]]
195 dh18i.set_index(dh18i.columns[0], inplace=True)
196 dh18i.columns = ['total_2018']
200 ```python Collapsed="false"
201 rd = raw_data_2018.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
202 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2018P)': 'deaths',
203 'Registration Week': 'week'}
206 rd['nation'] = 'Northern Ireland'
210 ```python Collapsed="false"
218 ```python Collapsed="false"
219 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation)
222 ```python Collapsed="false"
223 raw_data_2019 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2019.csv',
224 parse_dates=[1, 2], dayfirst=True,
229 dh19i = raw_data_2019.iloc[:, [0, 3]]
230 dh19i.set_index(dh19i.columns[0], inplace=True)
231 dh19i.columns = ['total_2019']
235 ```python Collapsed="false"
236 rd = raw_data_2019.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
237 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2019P)': 'deaths',
238 'Registration Week': 'week'}
241 rd['nation'] = 'Northern Ireland'
245 ```python Collapsed="false"
253 ```python Collapsed="false"
254 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation)
257 ```python Collapsed="false"
258 raw_data_2020_i = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2020.csv',
259 parse_dates=[1], dayfirst=True,
262 raw_data_2020_i.head()
265 ```python Collapsed="false"
266 rd = raw_data_2020_i.iloc[:, [0, 1, 2]].droplevel(1, axis=1).rename(
267 columns={'Week Ending (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2020P)': 'deaths',
268 'Registration Week': 'week'}
271 rd['nation'] = 'Northern Ireland'
275 ```python Collapsed="false"
279 ```python Collapsed="false"
287 ```python Collapsed="false"
288 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
292 raw_data_2020_i.set_index(raw_data_2020_i.columns[0], inplace=True)
293 raw_data_2020_i.tail()
296 ```python Collapsed="false"
297 datetime.datetime.now().isocalendar()
300 ```python Collapsed="false"
301 datetime.datetime.fromisocalendar(2021, 3, 1)
304 ```python Collapsed="false"
308 ```python Collapsed="false"
309 raw_data_s = pd.read_csv('uk-deaths-data/weekly-deaths-scotland.csv',
317 ```python Collapsed="false"
318 deaths_headlines_s = raw_data_s[reversed('2015 2016 2017 2018 2019 2020'.split())]
319 deaths_headlines_s.columns = ['total_' + c for c in deaths_headlines_s.columns]
320 deaths_headlines_s.reset_index(drop=True, inplace=True)
321 deaths_headlines_s.index = deaths_headlines_s.index + 1
326 %sql select * from all_causes_deaths limit 5
331 delete from all_causes_deaths where nation = 'Scotland';
333 with engine.connect() as connection:
334 connection.execute(query_string)
337 ```python Collapsed="false"
338 for year, ser in deaths_headlines_s.items():
339 year_i = int(year[-4:])
341 for week, deaths in ser.dropna().iteritems():
342 # print(datetime.date.fromisocalendar(year_i, week, 7), deaths)
343 dut = datetime.date.fromisocalendar(year_i, week, 7)
344 query_string = f'''insert into
345 all_causes_deaths(week, year, date_up_to, nation, deaths)
346 values ({week}, {year_i}, '{dut}', 'Scotland', {deaths});'''
347 with engine.connect() as connection:
348 connection.execute(query_string)
352 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
356 %sql select year, nation, date_up_to from all_causes_deaths where week=3 order by year, nation
359 ```python Collapsed="false"
360 eng_xls = pd.read_excel(england_wales_filename,
361 sheet_name="Weekly figures 2020",
362 skiprows=[0, 1, 2, 3],
369 ```python Collapsed="false"
373 ```python Collapsed="false"
374 eng_xls_columns = list(eng_xls.columns)
376 for i, c in enumerate(eng_xls_columns):
377 # print(i, c, type(c), isinstance(c, float))
378 if isinstance(c, float) and np.isnan(c):
379 if eng_xls.iloc[0].iloc[i] is not pd.NaT:
380 eng_xls_columns[i] = eng_xls.iloc[0].iloc[i]
382 # np.isnan(eng_xls_columns[0])
385 eng_xls.columns = eng_xls_columns
394 rd = eng_xls.iloc[1:][['Week ended', 'Wales']].reset_index(level=0).rename(
395 columns={'Week ended': 'date_up_to', 'Wales': 'deaths',
399 rd['nation'] = 'Wales'
411 ```python Collapsed="false"
412 eng_xls = eng_xls.iloc[1:]
413 eng_xls['England deaths'] = eng_xls.loc[:, 'Total deaths, all ages'] - eng_xls.loc[:, 'Wales']
421 rd = eng_xls[['Week ended', 'England deaths']].reset_index(level=0).rename(
422 columns={'Week ended': 'date_up_to', 'England deaths': 'deaths',
426 rd['nation'] = 'England'
432 delete from all_causes_deaths where nation = 'England';
434 with engine.connect() as connection:
435 connection.execute(query_string)
451 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
454 ```python Collapsed="false"
455 # raw_data_2020 = pd.read_csv('uk-deaths-data/publishedweek272020.csv',
456 # parse_dates=[1], dayfirst=True,
461 ```python Collapsed="false"
465 ```python Collapsed="false"
466 # raw_data_2020.head()
469 ```python Collapsed="false"
470 # raw_data_2020['W92000004', 'Wales']
473 ```python Collapsed="false"
474 raw_data_2019 = pd.read_csv('uk-deaths-data/publishedweek522019.csv',
475 parse_dates=[1], dayfirst=True,
478 # raw_data_2019.head()
482 rdew = raw_data_2019.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
487 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
488 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
489 'Week number': 'week'}
492 rd['nation'] = 'Wales'
505 rd = rdew.loc[:, ['Week ended','Week number']]
506 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
508 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
511 rd['nation'] = 'England'
524 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
527 ```python Collapsed="false"
528 raw_data_2018 = pd.read_csv('uk-deaths-data/publishedweek522018.csv',
529 parse_dates=[1], dayfirst=True,
532 # raw_data_2018.head()
536 rdew = raw_data_2018.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
541 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
542 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
543 'Week number': 'week'}
546 rd['nation'] = 'Wales'
559 rd = rdew.loc[:, ['Week ended','Week number']]
560 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
562 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
565 rd['nation'] = 'England'
578 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
581 ```python Collapsed="false"
582 raw_data_2017 = pd.read_csv('uk-deaths-data/publishedweek522017.csv',
583 parse_dates=[1], dayfirst=True,
586 # raw_data_2017.head()
590 rdew = raw_data_2017.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
595 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
596 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
597 'Week number': 'week'}
600 rd['nation'] = 'Wales'
613 rd = rdew.loc[:, ['Week ended','Week number']]
614 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
616 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
619 rd['nation'] = 'England'
632 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
639 ```python Collapsed="false"
640 raw_data_2016 = pd.read_csv('uk-deaths-data/publishedweek522016.csv',
641 parse_dates=[1], dayfirst=True,
644 # raw_data_2016.head()
652 rdew = raw_data_2016.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
657 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
658 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
659 'Week number': 'week'}
662 rd['nation'] = 'Wales'
675 rd = rdew.loc[:, ['Week ended','Week number']]
676 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
678 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
681 rd['nation'] = 'England'
694 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
697 ```python Collapsed="false"
698 raw_data_2015 = pd.read_csv('uk-deaths-data/publishedweek2015.csv',
699 parse_dates=[1], dayfirst=True,
702 # raw_data_2015.head()
706 rdew = raw_data_2015.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
711 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
712 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
713 'Week number': 'week'}
716 rd['nation'] = 'Wales'
729 rd = rdew.loc[:, ['Week ended','Week number']]
730 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
732 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
735 rd['nation'] = 'England'
748 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by year, nation
751 ```sql magic_args="res << select week, year, deaths"
752 from all_causes_deaths
753 where nation = 'England'
757 deaths_headlines_e = res.DataFrame().pivot(index='week', columns='year', values='deaths')
765 ```sql magic_args="res << select week, year, deaths"
766 from all_causes_deaths
767 where nation = 'Scotland'
771 deaths_headlines_s = res.DataFrame().pivot(index='week', columns='year', values='deaths')
775 ```sql magic_args="res << select week, year, deaths"
776 from all_causes_deaths
777 where nation = 'Wales'
781 deaths_headlines_w = res.DataFrame().pivot(index='week', columns='year', values='deaths')
785 ```sql magic_args="res << select week, year, deaths"
786 from all_causes_deaths
787 where nation = 'Northern Ireland'
791 deaths_headlines_i = res.DataFrame().pivot(index='week', columns='year', values='deaths')
795 ```python Collapsed="false"
796 deaths_headlines = deaths_headlines_e + deaths_headlines_w + deaths_headlines_i + deaths_headlines_s
801 deaths_headlines_e.columns
805 deaths_headlines_e['previous_mean'] = deaths_headlines_e[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
806 deaths_headlines_w['previous_mean'] = deaths_headlines_w[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
807 deaths_headlines_s['previous_mean'] = deaths_headlines_s[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
808 deaths_headlines_i['previous_mean'] = deaths_headlines_i[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
809 deaths_headlines['previous_mean'] = deaths_headlines[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
813 ```python Collapsed="false"
814 deaths_headlines[[2020, 2019, 2018, 2017, 2016, 2015]].plot(figsize=(14, 8))
817 ```python Collapsed="false"
818 deaths_headlines[[2020, 'previous_mean']].plot(figsize=(10, 8))
821 ```python Collapsed="false"
822 deaths_headlines_i.plot()
826 deaths_headlines[2020].sum() - deaths_headlines.previous_mean.sum()
829 ```python Collapsed="false"
830 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
832 dhna = deaths_headlines.dropna()
834 fig = plt.figure(figsize=(10, 10))
835 ax = fig.add_subplot(111, projection="polar")
839 np.arange(len(dhna))/float(len(dhna))*2.*np.pi),
841 # l15, = ax.plot(theta, deaths_headlines['total_2015'], color="#b56363", label="2015") # 0
842 # l16, = ax.plot(theta, deaths_headlines['total_2016'], color="#a4b563", label="2016") # 72
843 # l17, = ax.plot(theta, deaths_headlines['total_2017'], color="#63b584", label="2017") # 144
844 # l18, = ax.plot(theta, deaths_headlines['total_2018'], color="#6384b5", label="2018") # 216
845 # l19, = ax.plot(theta, deaths_headlines['total_2019'], color="#a4635b", label="2019") # 288
846 l15, = ax.plot(theta, dhna[2015], color="#e47d7d", label="2015") # 0
847 l16, = ax.plot(theta, dhna[2016], color="#afc169", label="2016") # 72 , d0e47d
848 l17, = ax.plot(theta, dhna[2017], color="#7de4a6", label="2017") # 144
849 l18, = ax.plot(theta, dhna[2018], color="#7da6e4", label="2018") # 216
850 l19, = ax.plot(theta, dhna[2019], color="#d07de4", label="2019") # 288
852 lmean, = ax.plot(theta, dhna['previous_mean'], color="black", linestyle='dashed', label="mean")
854 l20, = ax.plot(theta, dhna[2020], color="red", label="2020")
856 # deaths_headlines.total_2019.plot(ax=ax)
858 def _closeline(line):
859 x, y = line.get_data()
860 x = np.concatenate((x, [x[0]]))
861 y = np.concatenate((y, [y[0]]))
864 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
868 ax.set_xticklabels(dhna.index)
870 plt.title("Deaths by week over years, all UK")
871 plt.savefig('deaths-radar-2020.png')
875 <!-- #region Collapsed="false" -->
876 # Plots for UK nations
879 ```python Collapsed="false"
880 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
882 fig = plt.figure(figsize=(10, 10))
883 ax = fig.add_subplot(111, projection="polar")
887 np.arange(len(deaths_headlines_e))/float(len(deaths_headlines_e))*2.*np.pi),
889 l15, = ax.plot(theta, deaths_headlines_e[2015], color="#e47d7d", label="2015") # 0
890 l16, = ax.plot(theta, deaths_headlines_e[2016], color="#afc169", label="2016") # 72 , d0e47d
891 l17, = ax.plot(theta, deaths_headlines_e[2017], color="#7de4a6", label="2017") # 144
892 l18, = ax.plot(theta, deaths_headlines_e[2018], color="#7da6e4", label="2018") # 216
893 l19, = ax.plot(theta, deaths_headlines_e[2019], color="#d07de4", label="2019") # 288
895 lmean, = ax.plot(theta, deaths_headlines_e['previous_mean'], color="black", linestyle='dashed', label="mean")
897 l20, = ax.plot(theta, deaths_headlines_e[2020], color="red", label="2020")
899 # deaths_headlines.total_2019.plot(ax=ax)
901 def _closeline(line):
902 x, y = line.get_data()
903 x = np.concatenate((x, [x[0]]))
904 y = np.concatenate((y, [y[0]]))
907 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
911 ax.set_xticklabels(deaths_headlines_e.index)
913 plt.title("Deaths by week over years, England")
914 plt.savefig('deaths-radar-2020-england.png')
918 ```python Collapsed="false"
919 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
921 fig = plt.figure(figsize=(10, 10))
922 ax = fig.add_subplot(111, projection="polar")
926 np.arange(len(deaths_headlines_w))/float(len(deaths_headlines_w))*2.*np.pi),
928 l15, = ax.plot(theta, deaths_headlines_w[2015], color="#e47d7d", label="2015") # 0
929 l16, = ax.plot(theta, deaths_headlines_w[2016], color="#afc169", label="2016") # 72 , d0e47d
930 l17, = ax.plot(theta, deaths_headlines_w[2017], color="#7de4a6", label="2017") # 144
931 l18, = ax.plot(theta, deaths_headlines_w[2018], color="#7da6e4", label="2018") # 216
932 l19, = ax.plot(theta, deaths_headlines_w[2019], color="#d07de4", label="2019") # 288
934 lmean, = ax.plot(theta, deaths_headlines_w['previous_mean'], color="black", linestyle='dashed', label="mean")
936 l20, = ax.plot(theta, deaths_headlines_w[2020], color="red", label="2020")
939 def _closeline(line):
940 x, y = line.get_data()
941 x = np.concatenate((x, [x[0]]))
942 y = np.concatenate((y, [y[0]]))
945 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
949 ax.set_xticklabels(deaths_headlines_w.index)
951 plt.title("Deaths by week over years, Wales")
952 plt.savefig('deaths-radar-2020-wales.png')
956 ```python Collapsed="false"
957 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
959 fig = plt.figure(figsize=(10, 10))
960 ax = fig.add_subplot(111, projection="polar")
964 np.arange(len(deaths_headlines_s))/float(len(deaths_headlines_s))*2.*np.pi),
966 l15, = ax.plot(theta, deaths_headlines_s[2015], color="#e47d7d", label="2015") # 0
967 l16, = ax.plot(theta, deaths_headlines_s[2016], color="#afc169", label="2016") # 72 , d0e47d
968 l17, = ax.plot(theta, deaths_headlines_s[2017], color="#7de4a6", label="2017") # 144
969 l18, = ax.plot(theta, deaths_headlines_s[2018], color="#7da6e4", label="2018") # 216
970 l19, = ax.plot(theta, deaths_headlines_s[2019], color="#d07de4", label="2019") # 288
972 lmean, = ax.plot(theta, deaths_headlines_s['previous_mean'], color="black", linestyle='dashed', label="mean")
974 l20, = ax.plot(theta, deaths_headlines_s[2020], color="red", label="2020")
977 def _closeline(line):
978 x, y = line.get_data()
979 x = np.concatenate((x, [x[0]]))
980 y = np.concatenate((y, [y[0]]))
983 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
987 ax.set_xticklabels(deaths_headlines_s.index)
989 plt.title("Deaths by week over years, Scotland")
990 plt.savefig('deaths-radar-2020-scotland.png')
994 ```python Collapsed="false"
995 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
997 fig = plt.figure(figsize=(10, 10))
998 ax = fig.add_subplot(111, projection="polar")
1002 np.arange(len(deaths_headlines_i))/float(len(deaths_headlines_i))*2.*np.pi),
1004 l15, = ax.plot(theta, deaths_headlines_i[2015], color="#e47d7d", label="2015") # 0
1005 l16, = ax.plot(theta, deaths_headlines_i[2016], color="#afc169", label="2016") # 72 , d0e47d
1006 l17, = ax.plot(theta, deaths_headlines_i[2017], color="#7de4a6", label="2017") # 144
1007 l18, = ax.plot(theta, deaths_headlines_i[2018], color="#7da6e4", label="2018") # 216
1008 l19, = ax.plot(theta, deaths_headlines_i[2019], color="#d07de4", label="2019") # 288
1010 lmean, = ax.plot(theta, deaths_headlines_i['previous_mean'], color="black", linestyle='dashed', label="mean")
1012 l20, = ax.plot(theta, deaths_headlines_i[2020], color="red", label="2020")
1015 def _closeline(line):
1016 x, y = line.get_data()
1017 x = np.concatenate((x, [x[0]]))
1018 y = np.concatenate((y, [y[0]]))
1021 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
1024 ax.set_xticks(theta)
1025 ax.set_xticklabels(deaths_headlines_i.index)
1027 plt.title("Deaths by week over years, Northern Ireland")
1028 plt.savefig('deaths-radar-2020-northern-ireland.png')
1032 ```python Collapsed="false"
1036 ```python Collapsed="false"