9 jupytext_version: 1.9.1
11 display_name: Python 3
16 <!-- #region Collapsed="false" -->
19 * [Office of National Statistics](https://www.ons.gov.uk/peoplepopulationandcommunity/birthsdeathsandmarriages/deaths/datasets/weeklyprovisionalfiguresondeathsregisteredinenglandandwales) (Endland and Wales) Weeks start on a Saturday.
20 * [Northern Ireland Statistics and Research Agency](https://www.nisra.gov.uk/publications/weekly-deaths) (Northern Ireland). Weeks start on a Saturday. Note that the week numbers don't match the England and Wales data.
21 * [National Records of Scotland](https://www.nrscotland.gov.uk/statistics-and-data/statistics/statistics-by-theme/vital-events/general-publications/weekly-and-monthly-data-on-births-and-deaths/weekly-data-on-births-and-deaths) (Scotland). Note that Scotland uses ISO8601 week numbers, which start on a Monday.
25 ```python Collapsed="false"
31 from scipy.stats import gmean
34 import matplotlib as mpl
35 import matplotlib.pyplot as plt
38 from sqlalchemy.types import Integer, Text, String, DateTime, Float
39 from sqlalchemy import create_engine
43 ```python Collapsed="false"
44 connection_string = 'postgresql://covid:3NbjJTkT63@localhost/covid'
47 ```python Collapsed="false"
48 %sql $connection_string
51 ```python Collapsed="false"
52 conn = create_engine(connection_string)
55 ```python Collapsed="false"
56 england_wales_filename = 'uk-deaths-data/publishedweek532020.xlsx'
59 ```sql Collapsed="false"
60 drop table if exists all_causes_deaths;
61 create table all_causes_deaths (
67 CONSTRAINT week_nation PRIMARY KEY(year, week, nation)
71 ```python Collapsed="false"
72 raw_data_2015 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2015.csv',
73 parse_dates=[1, 2], dayfirst=True,
77 dh15i = raw_data_2015.iloc[:, [0, 3]]
78 dh15i.set_index(dh15i.columns[0], inplace=True)
79 dh15i.columns = ['total_2015']
83 ```python Collapsed="false"
87 ```python Collapsed="false"
88 rd = raw_data_2015.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
89 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2015P)': 'deaths',
90 'Registration Week': 'week'}
93 rd['nation'] = 'Northern Ireland'
97 ```python Collapsed="false"
105 ```python Collapsed="false"
106 %sql select * from all_causes_deaths limit 10
109 ```python Collapsed="false"
110 raw_data_2016 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2016.csv',
111 parse_dates=[1, 2], dayfirst=True,
116 # dh16i = raw_data_2016.iloc[:, [2]]
117 # dh16i.columns = ['total_2016']
119 dh16i = raw_data_2016.iloc[:, [0, 3]]
120 dh16i.set_index(dh16i.columns[0], inplace=True)
121 dh16i.columns = ['total_2016']
125 ```python Collapsed="false"
126 rd = raw_data_2016.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
127 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2016P)': 'deaths',
128 'Registration Week': 'week'}
131 rd['nation'] = 'Northern Ireland'
135 ```python Collapsed="false"
143 ```python Collapsed="false"
144 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation)
147 ```python Collapsed="false"
148 raw_data_2017 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2017.csv',
149 parse_dates=[1, 2], dayfirst=True,
154 dh17i = raw_data_2017.iloc[:, [0, 3]]
155 dh17i.set_index(dh17i.columns[0], inplace=True)
156 dh17i.columns = ['total_2017']
160 ```python Collapsed="false"
161 rd = raw_data_2017.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
162 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2017P)': 'deaths',
163 'Registration Week': 'week'}
166 rd['nation'] = 'Northern Ireland'
170 ```python Collapsed="false"
178 ```python Collapsed="false"
179 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation)
182 ```python Collapsed="false"
183 raw_data_2018 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2018.csv',
184 parse_dates=[1, 2], dayfirst=True,
189 dh18i = raw_data_2018.iloc[:, [0, 3]]
190 dh18i.set_index(dh18i.columns[0], inplace=True)
191 dh18i.columns = ['total_2018']
195 ```python Collapsed="false"
196 rd = raw_data_2018.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
197 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2018P)': 'deaths',
198 'Registration Week': 'week'}
201 rd['nation'] = 'Northern Ireland'
205 ```python Collapsed="false"
213 ```python Collapsed="false"
214 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation)
217 ```python Collapsed="false"
218 raw_data_2019 = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2019.csv',
219 parse_dates=[1, 2], dayfirst=True,
224 dh19i = raw_data_2019.iloc[:, [0, 3]]
225 dh19i.set_index(dh19i.columns[0], inplace=True)
226 dh19i.columns = ['total_2019']
230 ```python Collapsed="false"
231 rd = raw_data_2019.iloc[:, [0, 2, 3]].droplevel(1, axis=1).rename(
232 columns={'Week Ends (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2019P)': 'deaths',
233 'Registration Week': 'week'}
236 rd['nation'] = 'Northern Ireland'
240 ```python Collapsed="false"
248 ```python Collapsed="false"
249 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation)
252 ```python Collapsed="false"
253 raw_data_2020_i = pd.read_csv('uk-deaths-data/Weekly_Deaths_NI_2020.csv',
254 parse_dates=[1], dayfirst=True,
257 raw_data_2020_i.head()
260 ```python Collapsed="false"
261 rd = raw_data_2020_i.iloc[:, [0, 1, 2]].droplevel(1, axis=1).rename(
262 columns={'Week Ending (Friday)': 'date_up_to', 'Total Number of Deaths Registered in Week (2020P)': 'deaths',
263 'Registration Week': 'week'}
266 rd['nation'] = 'Northern Ireland'
270 ```python Collapsed="false"
274 ```python Collapsed="false"
282 ```python Collapsed="false"
283 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
287 raw_data_2020_i.set_index(raw_data_2020_i.columns[0], inplace=True)
288 raw_data_2020_i.tail()
291 ```python Collapsed="false"
292 datetime.datetime.now().isocalendar()
295 ```python Collapsed="false"
296 datetime.datetime.fromisocalendar(2021, 3, 1)
299 ```python Collapsed="false"
303 ```python Collapsed="false"
304 raw_data_s = pd.read_csv('uk-deaths-data/weekly-deaths-scotland.csv',
312 ```python Collapsed="false"
313 deaths_headlines_s = raw_data_s[reversed('2015 2016 2017 2018 2019 2020'.split())]
314 deaths_headlines_s.columns = ['total_' + c for c in deaths_headlines_s.columns]
315 deaths_headlines_s.reset_index(drop=True, inplace=True)
316 deaths_headlines_s.index = deaths_headlines_s.index + 1
321 %sql select * from all_causes_deaths limit 5
324 ```python Collapsed="false"
325 for year, ser in deaths_headlines_s.items():
326 year_i = int(year[-4:])
328 for week, deaths in ser.dropna().iteritems():
329 # print(datetime.date.fromisocalendar(year_i, week, 7), deaths)
330 dut = datetime.date.fromisocalendar(year_i, week, 7)
331 %sql insert into all_causes_deaths(week, year, date_up_to, nation, deaths) values ({week}, {year_i}, :dut, 'Scotland', {deaths})
335 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
339 %sql select year, nation, date_up_to from all_causes_deaths where week=3 order by year, nation
342 ```python Collapsed="false"
343 eng_xls = pd.read_excel(england_wales_filename,
344 sheet_name="Weekly figures 2020",
345 skiprows=[0, 1, 2, 3],
352 ```python Collapsed="false"
356 ```python Collapsed="false"
357 eng_xls_columns = list(eng_xls.columns)
359 for i, c in enumerate(eng_xls_columns):
360 # print(i, c, type(c), isinstance(c, float))
361 if isinstance(c, float) and np.isnan(c):
362 if eng_xls.iloc[0].iloc[i] is not pd.NaT:
363 eng_xls_columns[i] = eng_xls.iloc[0].iloc[i]
365 # np.isnan(eng_xls_columns[0])
368 eng_xls.columns = eng_xls_columns
377 rd = eng_xls.iloc[1:][['Week ended', 'Wales']].reset_index(level=0).rename(
378 columns={'Week ended': 'date_up_to', 'Wales': 'deaths',
382 rd['nation'] = 'Wales'
394 ```python Collapsed="false"
395 eng_xls = eng_xls.iloc[1:]
396 eng_xls['England deaths'] = eng_xls.loc[:, 'Total deaths, all ages'] - eng_xls.loc[:, 'Wales']
404 rd = eng_xls[['Week ended', 'England deaths']].reset_index(level=0).rename(
405 columns={'Week ended': 'date_up_to', 'England deaths': 'deaths',
409 rd['nation'] = 'England'
414 %sql delete from all_causes_deaths where nation = 'England'
430 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
433 ```python Collapsed="false"
434 # raw_data_2020 = pd.read_csv('uk-deaths-data/publishedweek272020.csv',
435 # parse_dates=[1], dayfirst=True,
440 ```python Collapsed="false"
444 ```python Collapsed="false"
445 # raw_data_2020.head()
448 ```python Collapsed="false"
449 # raw_data_2020['W92000004', 'Wales']
452 ```python Collapsed="false"
453 raw_data_2019 = pd.read_csv('uk-deaths-data/publishedweek522019.csv',
454 parse_dates=[1], dayfirst=True,
457 # raw_data_2019.head()
461 rdew = raw_data_2019.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
466 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
467 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
468 'Week number': 'week'}
471 rd['nation'] = 'Wales'
484 rd = rdew.loc[:, ['Week ended','Week number']]
485 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
487 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
490 rd['nation'] = 'England'
503 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
506 ```python Collapsed="false"
507 raw_data_2018 = pd.read_csv('uk-deaths-data/publishedweek522018.csv',
508 parse_dates=[1], dayfirst=True,
511 # raw_data_2018.head()
515 rdew = raw_data_2018.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
520 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
521 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
522 'Week number': 'week'}
525 rd['nation'] = 'Wales'
538 rd = rdew.loc[:, ['Week ended','Week number']]
539 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
541 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
544 rd['nation'] = 'England'
557 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
560 ```python Collapsed="false"
561 raw_data_2017 = pd.read_csv('uk-deaths-data/publishedweek522017.csv',
562 parse_dates=[1], dayfirst=True,
565 # raw_data_2017.head()
569 rdew = raw_data_2017.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
574 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
575 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
576 'Week number': 'week'}
579 rd['nation'] = 'Wales'
592 rd = rdew.loc[:, ['Week ended','Week number']]
593 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
595 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
598 rd['nation'] = 'England'
611 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
618 ```python Collapsed="false"
619 raw_data_2016 = pd.read_csv('uk-deaths-data/publishedweek522016.csv',
620 parse_dates=[1], dayfirst=True,
623 # raw_data_2016.head()
631 rdew = raw_data_2016.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
636 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
637 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
638 'Week number': 'week'}
641 rd['nation'] = 'Wales'
654 rd = rdew.loc[:, ['Week ended','Week number']]
655 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
657 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
660 rd['nation'] = 'England'
673 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by nation, year
676 ```python Collapsed="false"
677 raw_data_2015 = pd.read_csv('uk-deaths-data/publishedweek2015.csv',
678 parse_dates=[1], dayfirst=True,
681 # raw_data_2015.head()
685 rdew = raw_data_2015.iloc[:, [0, 1, 2, -1]].droplevel(axis=1, level=1)
690 rd = rdew.drop(columns=['Total deaths, all ages']).rename(
691 columns={'Week ended': 'date_up_to', 'W92000004': 'deaths',
692 'Week number': 'week'}
695 rd['nation'] = 'Wales'
708 rd = rdew.loc[:, ['Week ended','Week number']]
709 rd['deaths'] = rdew['Total deaths, all ages'] - rdew['W92000004']
711 columns={'Week ended': 'date_up_to', 'Week number': 'week'}
714 rd['nation'] = 'England'
727 %sql select year, nation, count(date_up_to) from all_causes_deaths group by (year, nation) order by year, nation
730 ```sql magic_args="res << select week, year, deaths"
731 from all_causes_deaths
732 where nation = 'England'
736 deaths_headlines_e = res.DataFrame().pivot(index='week', columns='year', values='deaths')
744 ```sql magic_args="res << select week, year, deaths"
745 from all_causes_deaths
746 where nation = 'Scotland'
750 deaths_headlines_s = res.DataFrame().pivot(index='week', columns='year', values='deaths')
754 ```sql magic_args="res << select week, year, deaths"
755 from all_causes_deaths
756 where nation = 'Wales'
760 deaths_headlines_w = res.DataFrame().pivot(index='week', columns='year', values='deaths')
764 ```sql magic_args="res << select week, year, deaths"
765 from all_causes_deaths
766 where nation = 'Northern Ireland'
770 deaths_headlines_i = res.DataFrame().pivot(index='week', columns='year', values='deaths')
774 ```python Collapsed="false"
775 deaths_headlines = deaths_headlines_e + deaths_headlines_w + deaths_headlines_i + deaths_headlines_s
780 deaths_headlines_e.columns
784 deaths_headlines_e['previous_mean'] = deaths_headlines_e[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
785 deaths_headlines_w['previous_mean'] = deaths_headlines_w[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
786 deaths_headlines_s['previous_mean'] = deaths_headlines_s[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
787 deaths_headlines_i['previous_mean'] = deaths_headlines_i[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
788 deaths_headlines['previous_mean'] = deaths_headlines[[int(y) for y in '2019 2018 2017 2016 2015'.split()]].apply(np.mean, axis=1)
792 ```python Collapsed="false"
793 deaths_headlines[[2020, 2019, 2018, 2017, 2016, 2015]].plot(figsize=(14, 8))
796 ```python Collapsed="false"
797 deaths_headlines[[2020, 'previous_mean']].plot(figsize=(10, 8))
800 ```python Collapsed="false"
801 deaths_headlines_i.plot()
805 deaths_headlines[2020].sum() - deaths_headlines.previous_mean.sum()
808 ```python Collapsed="false"
809 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
811 dhna = deaths_headlines.dropna()
813 fig = plt.figure(figsize=(10, 10))
814 ax = fig.add_subplot(111, projection="polar")
818 np.arange(len(dhna))/float(len(dhna))*2.*np.pi),
820 # l15, = ax.plot(theta, deaths_headlines['total_2015'], color="#b56363", label="2015") # 0
821 # l16, = ax.plot(theta, deaths_headlines['total_2016'], color="#a4b563", label="2016") # 72
822 # l17, = ax.plot(theta, deaths_headlines['total_2017'], color="#63b584", label="2017") # 144
823 # l18, = ax.plot(theta, deaths_headlines['total_2018'], color="#6384b5", label="2018") # 216
824 # l19, = ax.plot(theta, deaths_headlines['total_2019'], color="#a4635b", label="2019") # 288
825 l15, = ax.plot(theta, dhna[2015], color="#e47d7d", label="2015") # 0
826 l16, = ax.plot(theta, dhna[2016], color="#afc169", label="2016") # 72 , d0e47d
827 l17, = ax.plot(theta, dhna[2017], color="#7de4a6", label="2017") # 144
828 l18, = ax.plot(theta, dhna[2018], color="#7da6e4", label="2018") # 216
829 l19, = ax.plot(theta, dhna[2019], color="#d07de4", label="2019") # 288
831 lmean, = ax.plot(theta, dhna['previous_mean'], color="black", linestyle='dashed', label="mean")
833 l20, = ax.plot(theta, dhna[2020], color="red", label="2020")
835 # deaths_headlines.total_2019.plot(ax=ax)
837 def _closeline(line):
838 x, y = line.get_data()
839 x = np.concatenate((x, [x[0]]))
840 y = np.concatenate((y, [y[0]]))
843 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
847 ax.set_xticklabels(dhna.index)
849 plt.title("Deaths by week over years, all UK")
850 plt.savefig('deaths-radar-2020.png')
854 <!-- #region Collapsed="false" -->
855 # Plots for UK nations
858 ```python Collapsed="false"
859 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
861 fig = plt.figure(figsize=(10, 10))
862 ax = fig.add_subplot(111, projection="polar")
866 np.arange(len(deaths_headlines_e))/float(len(deaths_headlines_e))*2.*np.pi),
868 l15, = ax.plot(theta, deaths_headlines_e[2015], color="#e47d7d", label="2015") # 0
869 l16, = ax.plot(theta, deaths_headlines_e[2016], color="#afc169", label="2016") # 72 , d0e47d
870 l17, = ax.plot(theta, deaths_headlines_e[2017], color="#7de4a6", label="2017") # 144
871 l18, = ax.plot(theta, deaths_headlines_e[2018], color="#7da6e4", label="2018") # 216
872 l19, = ax.plot(theta, deaths_headlines_e[2019], color="#d07de4", label="2019") # 288
874 lmean, = ax.plot(theta, deaths_headlines_e['previous_mean'], color="black", linestyle='dashed', label="mean")
876 l20, = ax.plot(theta, deaths_headlines_e[2020], color="red", label="2020")
878 # deaths_headlines.total_2019.plot(ax=ax)
880 def _closeline(line):
881 x, y = line.get_data()
882 x = np.concatenate((x, [x[0]]))
883 y = np.concatenate((y, [y[0]]))
886 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
890 ax.set_xticklabels(deaths_headlines_e.index)
892 plt.title("Deaths by week over years, England")
893 plt.savefig('deaths-radar-2020-england.png')
897 ```python Collapsed="false"
898 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
900 fig = plt.figure(figsize=(10, 10))
901 ax = fig.add_subplot(111, projection="polar")
905 np.arange(len(deaths_headlines_w))/float(len(deaths_headlines_w))*2.*np.pi),
907 l15, = ax.plot(theta, deaths_headlines_w[2015], color="#e47d7d", label="2015") # 0
908 l16, = ax.plot(theta, deaths_headlines_w[2016], color="#afc169", label="2016") # 72 , d0e47d
909 l17, = ax.plot(theta, deaths_headlines_w[2017], color="#7de4a6", label="2017") # 144
910 l18, = ax.plot(theta, deaths_headlines_w[2018], color="#7da6e4", label="2018") # 216
911 l19, = ax.plot(theta, deaths_headlines_w[2019], color="#d07de4", label="2019") # 288
913 lmean, = ax.plot(theta, deaths_headlines_w['previous_mean'], color="black", linestyle='dashed', label="mean")
915 l20, = ax.plot(theta, deaths_headlines_w[2020], color="red", label="2020")
918 def _closeline(line):
919 x, y = line.get_data()
920 x = np.concatenate((x, [x[0]]))
921 y = np.concatenate((y, [y[0]]))
924 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
928 ax.set_xticklabels(deaths_headlines_w.index)
930 plt.title("Deaths by week over years, Wales")
931 plt.savefig('deaths-radar-2020-wales.png')
935 ```python Collapsed="false"
936 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
938 fig = plt.figure(figsize=(10, 10))
939 ax = fig.add_subplot(111, projection="polar")
943 np.arange(len(deaths_headlines_s))/float(len(deaths_headlines_s))*2.*np.pi),
945 l15, = ax.plot(theta, deaths_headlines_s[2015], color="#e47d7d", label="2015") # 0
946 l16, = ax.plot(theta, deaths_headlines_s[2016], color="#afc169", label="2016") # 72 , d0e47d
947 l17, = ax.plot(theta, deaths_headlines_s[2017], color="#7de4a6", label="2017") # 144
948 l18, = ax.plot(theta, deaths_headlines_s[2018], color="#7da6e4", label="2018") # 216
949 l19, = ax.plot(theta, deaths_headlines_s[2019], color="#d07de4", label="2019") # 288
951 lmean, = ax.plot(theta, deaths_headlines_s['previous_mean'], color="black", linestyle='dashed', label="mean")
953 l20, = ax.plot(theta, deaths_headlines_s[2020], color="red", label="2020")
956 def _closeline(line):
957 x, y = line.get_data()
958 x = np.concatenate((x, [x[0]]))
959 y = np.concatenate((y, [y[0]]))
962 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
966 ax.set_xticklabels(deaths_headlines_s.index)
968 plt.title("Deaths by week over years, Scotland")
969 plt.savefig('deaths-radar-2020-scotland.png')
973 ```python Collapsed="false"
974 # Radar plot code taken from example at https://stackoverflow.com/questions/42878485/getting-matplotlib-radar-plot-with-pandas#
976 fig = plt.figure(figsize=(10, 10))
977 ax = fig.add_subplot(111, projection="polar")
981 np.arange(len(deaths_headlines_i))/float(len(deaths_headlines_i))*2.*np.pi),
983 l15, = ax.plot(theta, deaths_headlines_i[2015], color="#e47d7d", label="2015") # 0
984 l16, = ax.plot(theta, deaths_headlines_i[2016], color="#afc169", label="2016") # 72 , d0e47d
985 l17, = ax.plot(theta, deaths_headlines_i[2017], color="#7de4a6", label="2017") # 144
986 l18, = ax.plot(theta, deaths_headlines_i[2018], color="#7da6e4", label="2018") # 216
987 l19, = ax.plot(theta, deaths_headlines_i[2019], color="#d07de4", label="2019") # 288
989 lmean, = ax.plot(theta, deaths_headlines_i['previous_mean'], color="black", linestyle='dashed', label="mean")
991 l20, = ax.plot(theta, deaths_headlines_i[2020], color="red", label="2020")
994 def _closeline(line):
995 x, y = line.get_data()
996 x = np.concatenate((x, [x[0]]))
997 y = np.concatenate((y, [y[0]]))
1000 [_closeline(l) for l in [l19, l18, l17, l16, l15, lmean]]
1003 ax.set_xticks(theta)
1004 ax.set_xticklabels(deaths_headlines_i.index)
1006 plt.title("Deaths by week over years, Northern Ireland")
1007 plt.savefig('deaths-radar-2020-northern-ireland.png')
1011 ```python Collapsed="false"
1015 ```python Collapsed="false"