kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
171,635
start_date = '01/22/2020' start_pred = 67 start_submit = 57 test_date_list = test_data.iloc[:, 3].unique() test_data_filled = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv') test_data_filled = test_data_filled.fillna(value='NULL') test_data_filled['ConfirmedCases'] = pred_data['ConfirmedCases'] test_data_filled['Fatalities'] = pred_data['Fatalities'] for idx,(k, v)in enumerate(train_data.groupby(['Province_State', 'Country_Region'])) : print(idx, k, end=' ') b_cc, b_f = 3, 2 if k[1] == 'China': b_cc = 1 b_f = 1 if k[0] == 'Hong Kong': b_cc = 3 elif k[1] == 'Italy': b_cc = 5 b_f = 3 elif k[1] == 'US': b_cc = 4 b_f = 2 elif k[1] == 'Spain': b_cc = 4 b_f = 3 hist_num = v.loc[:,'ConfirmedCases'].tolist() ret = func_fitting(hist_num, y_scale=max(1000, np.max(hist_num)) , b=b_cc, start_pred=start_pred, PN=10, MAXN=len(hist_num)+30) ret = list(ret) real_cc = np.round(np.array(ret[1])) pred_cc = np.round(np.array(ret[5])) for i in range(len(real_cc)) : pred_cc[i] = real_cc[i] pred_cc = pred_cc[start_submit:] print(pred_cc) hist_num = v.loc[:,'Fatalities'].tolist() ret = func_fitting(hist_num, y_scale=max(1000, np.max(hist_num)) , b=b_f, start_pred=start_pred, PN=10, MAXN=len(hist_num)+30) ret = list(ret) real_f = np.round(np.array(ret[1])) pred_f = np.round(np.array(ret[5])) for i in range(len(real_f)) : pred_f[i] = real_f[i] pred_f = pred_f[start_submit:] print(pred_f) for i in range(14, len(pred_cc)) : if pred_f[i] < 20 and pred_cc[i] > 200 and k[1] != 'China': pred_f[i] = pred_cc[i] * 0.01 * np.log10(pred_cc[i]) print(pred_f) print(pred_cc[-1], pred_f[-1]) for i in range(len(pred_cc)) : index =(test_data_filled['Province_State'] == k[0])& \ (test_data_filled['Country_Region'] == k[1])& \ (test_data_filled['Date'] == test_date_list[i]) test_data_filled.loc[index, 'ConfirmedCases'] = pred_cc[i] test_data_filled.loc[index, 'Fatalities'] = pred_f[i] <filter>
train_station_1 = train_station_1.reset_index(drop= True) train_station_2 = train_station_2.reset_index(drop = True) train_merge = pd.concat([train_station_1, train_station_2], axis= 1) test_station_1 = test_station_1.reset_index(drop= True) test_station_2 = test_station_2.reset_index(drop = True) test_merge = pd.concat([test_station_1, test_station_2], axis= 1 )
West Nile Virus Prediction
171,635
submission = test_data_filled.loc[:,['ForecastId', 'ConfirmedCases', 'Fatalities']]<save_to_csv>
labels = train_merge.pop('WnvPresent' ).values
West Nile Virus Prediction
171,635
submission.to_csv("submission.csv", index=False) submission.head(500 )<set_options>
train_merge = train_merge.drop(['stat_1_Date', 'stat_2_Date'], axis = 1) test_merge = test_merge.drop(['stat_1_Date', 'stat_2_Date' ], axis = 1 )
West Nile Virus Prediction
171,635
%matplotlib inline InteractiveShell.ast_node_interactivity = "all" pd.set_option('display.max_columns', 99) pd.set_option('display.max_rows', 99) <set_options>
train_merge['Lat_int'] = train_merge.Latitude.astype(int) train_merge['Long_int'] = train_merge.Longitude.astype(int) test_merge['Lat_int'] = test_merge.Latitude.astype(int) test_merge['Long_int'] = test_merge.Longitude.astype(int )
West Nile Virus Prediction
171,635
plt.rcParams['figure.figsize'] = [16, 10] plt.rcParams['font.size'] = 14 sns.set_palette(sns.color_palette('tab20', 20)) <load_from_csv>
train_merge = pd.get_dummies(train_merge, columns= ['Species']) train_merge = pd.get_dummies(train_merge, columns= ['Block']) train_merge = pd.get_dummies(train_merge, columns= ['Street']) train_merge = pd.get_dummies(train_merge, columns= ['Trap']) test_merge = pd.get_dummies(test_merge, columns= ['Species']) test_merge = pd.get_dummies(test_merge, columns= ['Block']) test_merge = pd.get_dummies(test_merge, columns= ['Street']) test_merge = pd.get_dummies(test_merge, columns= ['Trap'] )
West Nile Virus Prediction
171,635
COMP = '.. /input/covid19-global-forecasting-week-2' DATEFORMAT = '%Y-%m-%d' def get_comp_data(COMP): train = pd.read_csv(f'{COMP}/train.csv') test = pd.read_csv(f'{COMP}/test.csv') submission = pd.read_csv(f'{COMP}/submission.csv') print(train.shape, test.shape, submission.shape) train['Country_Region'] = train['Country_Region'].str.replace(',', '') test['Country_Region'] = test['Country_Region'].str.replace(',', '') train['Location'] = train['Country_Region'] + '-' + train['Province_State'].fillna('') test['Location'] = test['Country_Region'] + '-' + test['Province_State'].fillna('') train['LogConfirmed'] = to_log(train.ConfirmedCases) train['LogFatalities'] = to_log(train.Fatalities) train = train.drop(columns=['Province_State']) test = test.drop(columns=['Province_State']) country_codes = pd.read_csv('.. /input/covid19-metadata/country_codes.csv', keep_default_na=False) train = train.merge(country_codes, on='Country_Region', how='left') test = test.merge(country_codes, on='Country_Region', how='left') train['DateTime'] = pd.to_datetime(train['Date']) test['DateTime'] = pd.to_datetime(test['Date']) return train, test, submission def process_each_location(df): dfs = [] for loc, df in tqdm(df.groupby('Location')) : df = df.sort_values(by='Date') df['Fatalities'] = df['Fatalities'].cummax() df['ConfirmedCases'] = df['ConfirmedCases'].cummax() df['LogFatalities'] = df['LogFatalities'].cummax() df['LogConfirmed'] = df['LogConfirmed'].cummax() df['LogConfirmedNextDay'] = df['LogConfirmed'].shift(-1) df['ConfirmedNextDay'] = df['ConfirmedCases'].shift(-1) df['DateNextDay'] = df['Date'].shift(-1) df['LogFatalitiesNextDay'] = df['LogFatalities'].shift(-1) df['FatalitiesNextDay'] = df['Fatalities'].shift(-1) df['LogConfirmedDelta'] = df['LogConfirmedNextDay'] - df['LogConfirmed'] df['ConfirmedDelta'] = df['ConfirmedNextDay'] - df['ConfirmedCases'] df['LogFatalitiesDelta'] = df['LogFatalitiesNextDay'] - df['LogFatalities'] df['FatalitiesDelta'] = df['FatalitiesNextDay'] - df['Fatalities'] dfs.append(df) return pd.concat(dfs) def add_days(d, k): return dt.datetime.strptime(d, DATEFORMAT)+ dt.timedelta(days=k) def to_log(x): return np.log(x + 1) def to_exp(x): return np.exp(x)- 1 <save_to_csv>
train_merge= train_merge.drop('Station', axis= 1) test_merge= test_merge.drop('Station', axis= 1 )
West Nile Virus Prediction
171,635
train = train.sort_values(by='Date') countries_latest_state = train[train['Date'] == TRAIN_END].groupby([ 'Country_Region', 'continent', 'geo_region', 'country_iso_code_3'] ).sum() [[ 'ConfirmedCases', 'Fatalities']].reset_index() countries_latest_state['Log10Confirmed'] = np.log10(countries_latest_state.ConfirmedCases + 1) countries_latest_state['Log10Fatalities'] = np.log10(countries_latest_state.Fatalities + 1) countries_latest_state = countries_latest_state.sort_values(by='Fatalities', ascending=False) countries_latest_state.to_csv('countries_latest_state.csv', index=False) countries_latest_state.shape countries_latest_state.head()<merge>
West Nile Virus Prediction
171,635
latest_loc = train[train['Date'] == TRAIN_END][['Location', 'ConfirmedCases', 'Fatalities']] max_loc = train.groupby(['Location'])[['ConfirmedCases', 'Fatalities']].max().reset_index() check = pd.merge(latest_loc, max_loc, on='Location') np.mean(check.ConfirmedCases_x == check.ConfirmedCases_y) np.mean(check.Fatalities_x == check.Fatalities_y) check[check.Fatalities_x != check.Fatalities_y] check[check.ConfirmedCases_x != check.ConfirmedCases_y]<feature_engineering>
unique_test_cols = [col for col in test_merge.columns if col not in train_merge.columns]
West Nile Virus Prediction
171,635
regional_progress = train_clean.groupby(['DateTime', 'continent'] ).sum() [['ConfirmedCases', 'Fatalities']].reset_index() regional_progress['Log10Confirmed'] = np.log10(regional_progress.ConfirmedCases + 1) regional_progress['Log10Fatalities'] = np.log10(regional_progress.Fatalities + 1) regional_progress = regional_progress[regional_progress.continent != '<merge>
test_merge= test_merge.drop(unique_test_cols, axis= 1 )
West Nile Virus Prediction
171,635
countries_0301 = country_progress[country_progress.Date == '2020-03-01'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_0331 = country_progress[country_progress.Date == '2020-03-31'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_in_march = pd.merge(countries_0301, countries_0331, on='Country_Region', suffixes=['_0301', '_0331']) countries_in_march['IncreaseInMarch'] = countries_in_march.ConfirmedCases_0331 /(countries_in_march.ConfirmedCases_0301 + 1) countries_in_march = countries_in_march[countries_in_march.ConfirmedCases_0331 > 200].sort_values( by='IncreaseInMarch', ascending=False) countries_in_march.tail(15 )<save_to_csv>
West Nile Virus Prediction
171,635
train_clean['Geo latest = train_clean[train_clean.Date == '2020-03-31'][[ 'Geo daily_confirmed_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_confirmed_deltas = latest.merge(daily_confirmed_deltas, on='Geo daily_confirmed_deltas.shape daily_confirmed_deltas.head() daily_confirmed_deltas.to_csv('daily_confirmed_deltas.csv', index=False )<save_to_csv>
clf = ensemble.RandomForestClassifier(n_estimators=1000, min_samples_split= 2, random_state= 42) clf.fit(train_merge, labels )
West Nile Virus Prediction
171,635
deltas = train_clean[np.logical_and( train_clean.LogConfirmed > 2, ~train_clean.Location.str.startswith('China') )].dropna().sort_values(by='LogConfirmedDelta', ascending=False) deltas['start'] = deltas['LogConfirmed'].round(0) confirmed_deltas = pd.concat([ deltas.groupby('start')[['LogConfirmedDelta']].mean() , deltas.groupby('start')[['LogConfirmedDelta']].std() , deltas.groupby('start')[['LogConfirmedDelta']].count() ], axis=1) deltas.mean() confirmed_deltas.columns = ['avg', 'std', 'cnt'] confirmed_deltas confirmed_deltas.to_csv('confirmed_deltas.csv' )<feature_engineering>
predictions_randfor = clf.predict_proba(test_merge)[:,1]
West Nile Virus Prediction
171,635
DECAY = 0.93 DECAY ** 7, DECAY ** 14, DECAY ** 21, DECAY ** 28 confirmed_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 confirmed_deltas['DELTA'] = GLOBAL_DELTA confirmed_deltas.loc[confirmed_deltas.continent=='Africa', 'DELTA'] = 0.14 confirmed_deltas.loc[confirmed_deltas.continent=='Oceania', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Korea South', 'DELTA'] = 0.011 confirmed_deltas.loc[confirmed_deltas.Country_Region=='US', 'DELTA'] = 0.15 confirmed_deltas.loc[confirmed_deltas.Country_Region=='China', 'DELTA'] = 0.01 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Japan', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Norway', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iceland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Austria', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Italy', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Spain', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Portugal', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Israel', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iran', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Germany', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Russia', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Brazil', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Turkey', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Philippines', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Location=='France-', 'DELTA'] = 0.1 confirmed_deltas.loc[confirmed_deltas.Location=='United Kingdom-', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 confirmed_deltas.loc[confirmed_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Location=='San Marino-', 'DELTA'] = 0.03 confirmed_deltas.shape, confirmed_deltas.DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].shape, confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA] confirmed_deltas.describe()<save_to_csv>
xgbc = xgb.XGBClassifier(seed= 42) xgbc.fit(train_merge, labels) fig, ax = pyplot.subplots(figsize=(10, 15)) xgb.plot_importance(xgbc, ax=ax)
West Nile Virus Prediction
171,635
daily_log_confirmed = train_clean.pivot('Location', 'Date', 'LogConfirmed' ).reset_index() daily_log_confirmed = daily_log_confirmed.sort_values(TRAIN_END, ascending=False) daily_log_confirmed.to_csv('daily_log_confirmed.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in confirmed_deltas.Location.values: confirmed_delta = confirmed_deltas.loc[confirmed_deltas.Location == loc, 'DELTA'].values[0] daily_log_confirmed.loc[daily_log_confirmed.Location == loc, new_day] = daily_log_confirmed.loc[daily_log_confirmed.Location == loc, last_day] + \ confirmed_delta * DECAY ** i<save_to_csv>
xgbc.get_fscore()
West Nile Virus Prediction
171,635
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_death_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_death_deltas = latest.merge(daily_death_deltas, on='Geo daily_death_deltas.shape daily_death_deltas.head() daily_death_deltas.to_csv('daily_death_deltas.csv', index=False )<feature_engineering>
def calc_roc_auc(y, predict_probs): fpr, tpr, thresholds = metrics.roc_curve(y, predict_probs) roc_auc = metrics.auc(fpr, tpr) return roc_auc
West Nile Virus Prediction
171,635
death_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 death_deltas['DELTA'] = GLOBAL_DELTA death_deltas.loc[death_deltas.Country_Region=='China', 'DELTA'] = 0.005 death_deltas.loc[death_deltas.continent=='Oceania', 'DELTA'] = 0.08 death_deltas.loc[death_deltas.Country_Region=='Korea South', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Japan', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='US', 'DELTA'] = 0.17 death_deltas.loc[death_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Norway', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Iceland', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Country_Region=='Austria', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Italy', 'DELTA'] = 0.07 death_deltas.loc[death_deltas.Country_Region=='Spain', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Country_Region=='Portugal', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Israel', 'DELTA'] = 0.16 death_deltas.loc[death_deltas.Country_Region=='Iran', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='Germany', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Russia', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Brazil', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Turkey', 'DELTA'] = 0.22 death_deltas.loc[death_deltas.Country_Region=='Philippines', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Location=='France-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='United Kingdom-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 death_deltas.loc[death_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Location=='San Marino-', 'DELTA'] = 0.05 death_deltas.shape death_deltas.DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA].shape death_deltas[death_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA] death_deltas.describe()<save_to_csv>
train_split, val_split, label_train_split, label_val_split = model_selection.train_test_split(train_merge, labels, test_size = 0.33, random_state = 42, stratify= labels )
West Nile Virus Prediction
171,635
daily_log_deaths = train_clean.pivot('Location', 'Date', 'LogFatalities' ).reset_index() daily_log_deaths = daily_log_deaths.sort_values(TRAIN_END, ascending=False) daily_log_deaths.to_csv('daily_log_deaths.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in death_deltas.Location: death_delta = death_deltas.loc[death_deltas.Location == loc, 'DELTA'].values[0] daily_log_deaths.loc[daily_log_deaths.Location == loc, new_day] = daily_log_deaths.loc[daily_log_deaths.Location == loc, last_day] + \ death_delta * DECAY ** i<feature_engineering>
def select_features_by_importance_threshold(model, X_train, y_train, selection_model, X_test, y_test, minimum = False): if minimum: thresholds= np.unique(model.feature_importances_[model.feature_importances_ > minimum]) thresholds = np.insert(thresholds, 0, 0.) else: thresholds= np.unique(model.feature_importances_) print(thresholds) for thresh in thresholds: selection = feature_selection.SelectFromModel(model, threshold=thresh, prefit=True) select_X_train = selection.transform(X_train) selection_model = selection_model selection_model.fit(select_X_train, y_train) select_X_test = selection.transform(X_test) y_pred = selection_model.predict_proba(select_X_test)[:,1] predictions = y_pred auc = calc_roc_auc(y_test, predictions) print("Thresh=%.3f, n=%d, AUC: %.2f%%" %(thresh, select_X_train.shape[1], auc))
West Nile Virus Prediction
171,635
confirmed = [] fatalities = [] for id, d, loc in tqdm(test[['ForecastId', 'Date', 'Location']].values): c = to_exp(daily_log_confirmed.loc[daily_log_confirmed.Location == loc, d].values[0]) f = to_exp(daily_log_deaths.loc[daily_log_deaths.Location == loc, d].values[0]) confirmed.append(c) fatalities.append(f )<prepare_output>
sfm = feature_selection.SelectFromModel(xgbc, threshold=0.023, prefit= True) sfm_train= sfm.transform(train_merge) n_features = sfm_train.shape[1] print(n_features )
West Nile Virus Prediction
171,635
my_submission = test.copy() my_submission['ConfirmedCases'] = confirmed my_submission['Fatalities'] = fatalities my_submission.shape my_submission.head() <save_to_csv>
xgb_clf= xgb.XGBClassifier(seed= 42) xgb_clf.fit(sfm_train, labels )
West Nile Virus Prediction
171,635
my_submission[[ 'ForecastId', 'ConfirmedCases', 'Fatalities' ]].to_csv('submission.csv', index=False) print(DECAY) my_submission.head() my_submission.tail() my_submission.shape<train_model>
sfm_test = sfm.transform(test_merge) predictions_xgb = xgb_clf.predict_proba(sfm_test)[:,1]
West Nile Virus Prediction
171,635
end = dt.datetime.now() print('Finished', end,(end - start ).seconds, 's' )<import_modules>
X_train= train_split X_test= val_split y_train= label_train_split y_test= label_val_split model= xgb.XGBClassifier(seed= 42) eval_set = [(X_train, y_train),(X_test, y_test)] model.fit(X_train, y_train, eval_metric="auc", eval_set=eval_set, verbose=True )
West Nile Virus Prediction
171,635
print("Read in libraries") <load_from_csv>
results = model.evals_result() print(results )
West Nile Virus Prediction
171,635
print("read in train file") df=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv", usecols=['Province_State','Country_Region','Date','ConfirmedCases','Fatalities']) <drop_column>
eval_set = [(X_test, y_test)] model.fit(X_train, y_train, eval_metric=["auc"], eval_set=eval_set, early_stopping_rounds=10) results = model.evals_result() print(results )
West Nile Virus Prediction
171,635
print("fill blanks and add region for counting") df.fillna(' ',inplace=True) df['Lat']=df['Province_State']+df['Country_Region'] df.drop('Province_State',axis=1,inplace=True) df.drop('Country_Region',axis=1,inplace=True) <load_from_csv>
def report(results, n_top=3): for i in range(1, n_top + 1): candidates = np.flatnonzero(results['rank_test_score'] == i) for candidate in candidates: print("Model with rank: {0}".format(i)) print("Mean validation score: {0:.3f}(std: {1:.3f})".format( results['mean_test_score'][candidate], results['std_test_score'][candidate])) print("Parameters: {0}".format(results['params'][candidate])) print("" )
West Nile Virus Prediction
171,635
countries_list=df.Lat.unique() df1=[] for i in countries_list: df1.append(df[df['Lat']==i]) print("we have "+ str(len(df1)) +" regions in our dataset") test=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/test.csv" )<choose_model_class>
n_estimators_dist= sps.randint(1, 300) learning_rate_dist = [0.01, 0.02, 0.05, 0.1, 0.2, 0.3]
West Nile Virus Prediction
171,635
submit_confirmed=[] submit_fatal=[] for i in df1: data = i.ConfirmedCases.astype('int32' ).tolist() try: model = SARIMAX(data, order=(1,1,0), seasonal_order=(1,1,0,12),measurement_error=True) model_fit = model.fit(disp=False) predicted = model_fit.predict(len(data), len(data)+34) new=np.concatenate(( np.array(data),np.array([int(num)for num in predicted])) ,axis=0) submit_confirmed.extend(list(new[-43:])) except: submit_confirmed.extend(list(data[-10:-1])) for j in range(34): submit_confirmed.append(data[-1]*2) data = i.Fatalities.astype('int32' ).tolist() try: model = SARIMAX(data, order=(1,1,0), seasonal_order=(1,1,0,12),measurement_error=True) model_fit = model.fit(disp=False) predicted = model_fit.predict(len(data), len(data)+34) new=np.concatenate(( np.array(data),np.array([int(num)for num in predicted])) ,axis=0) submit_fatal.extend(list(new[-43:])) except: submit_fatal.extend(list(data[-10:-1])) for j in range(34): submit_fatal.append(data[-1]*2) <data_type_conversions>
param_dist = dict(learning_rate= learning_rate_dist, n_estimators= n_estimators_dist) n_iter_search = 20 random_search = model_selection.RandomizedSearchCV(model, param_distributions=param_dist, n_iter=n_iter_search, scoring= 'roc_auc') start = time() random_search.fit(X_train, y_train) print("RandomizedSearchCV took %.2f seconds for %d candidates" " parameter settings." %(( time() - start), n_iter_search)) report(random_search.cv_results_ )
West Nile Virus Prediction
171,635
<merge><EOS>
sample_sub['WnvPresent'] = predictions_xgb sample_sub.to_csv('sub_xgb.csv', index=False)
West Nile Virus Prediction
149,246
<SOS> metric: SantaWeightedBins Kaggle data source: santas-uncertain-bags<save_to_csv>
%matplotlib inline
Santa's Uncertain Bags
149,246
df_submit.to_csv('submission.csv',header=['ForecastId','ConfirmedCases','Fatalities'],index=False) complete_test.to_csv('complete_test.csv',index=False) <load_from_csv>
def sample_horse(size=1): return np.maximum(0, np.random.normal(5,2,size)) def sample_ball(size=1): return np.maximum(0, 1 + np.random.normal(1,0.3,size)) def sample_bike(size=1): return np.maximum(0, np.random.normal(20,10,size)) def sample_train(size=1): return np.maximum(0, np.random.normal(10,5,size)) def sample_coal(size=1): return 47 * np.random.beta(0.5,0.5,size) def sample_book(size=1): return np.random.chisquare(2,size) def sample_doll(size=1): return np.random.gamma(5,1,size) def sample_block(size=1): return np.random.triangular(5,10,20,size) def sample_gloves(size=1): dist1 = 3.0 + np.random.rand(size) dist2 = np.random.rand(size) toggle = np.random.rand(size)< 0.3 dist2[toggle] = dist1[toggle] return dist2 samplers = { "horse": sample_horse, "ball": sample_ball, "bike": sample_bike, "train": sample_train, "coal": sample_coal, "book": sample_book, "doll": sample_doll, "blocks": sample_block, "gloves": sample_gloves } def sample(gift, quantity=1, size=1): return np.sum(samplers[gift](quantity * size ).reshape(quantity, size), axis=0) print(sample("horse", 2, 10))
Santa's Uncertain Bags
149,246
train_data = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv") test_data = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/test.csv" )<feature_engineering>
def bag_name(bag): return str(list(map(lambda gift: "{}({})".format(gift, bag[gift]), sorted(bag.keys())))) def create_bag_weight_sampler(bag): def bag_weight_sampler(size=1): weight = np.array([0.0]*size) for gift in sorted(bag.keys()): weight += sample(gift, bag[gift], size) return weight return bag_weight_sampler, bag_name(bag) bag = { "horse": 1, "ball": 2 } bag_weight_sampler, name = create_bag_weight_sampler(bag) print("Sampling from bag {}: {}".format(name, bag_weight_sampler(3)) )
Santa's Uncertain Bags
149,246
for row in range(len(train_data['Province_State'])) : if str(train_data['Province_State'][row])== 'nan': train_data['Province_State'][row] = train_data['Country_Region'][row]<feature_engineering>
np.random.seed(42) def get_gift_weight_distributions(gifts, size=10000): def get_gift_weight_dsitribution(gift): sampler = samplers[gift] sample = sampler(size) return np.mean(sample), np.var(sample) distributions = np.zeros(( len(gifts), 2)) for i, gift in enumerate(gifts): distributions[i, :] = get_gift_weight_dsitribution(gift) return distributions gifts = sorted(samplers.keys()) print("Canonical gift order: {} ".format(gifts)) gift_weight_distributions = get_gift_weight_distributions(gifts) print(pd.DataFrame(data=gift_weight_distributions, index=gifts, columns=["mean", "std"]))
Santa's Uncertain Bags
149,246
for row in range(len(test_data['Province_State'])) : if str(test_data['Province_State'][row])== 'nan': test_data['Province_State'][row] = test_data['Country_Region'][row]<define_variables>
def create_candidate_bags(max_quantities): gift_counts = [] for max_quantity in max_quantities: gift_counts.append(np.arange(max_quantity)) return cartesian(gift_counts) mixed_item_candiadte_bags = create_candidate_bags(mixed_item_max_quantities) print("Created candiadate bags: {}".format(mixed_item_candiadte_bags.shape))
Santa's Uncertain Bags
149,246
day_number = train_data[train_data['Province_State'] == 'Florida'].shape[0] day_number<define_variables>
def get_bag_weight_distributions(candidate_bags, min_mean=30, max_mean=50): return np.dot(candidate_bags, gift_weight_distributions) def filter_by_mean(bags, distributions, min_mean=30, max_mean=50): min_mask = mean_of(distributions)> min_mean distributions = distributions[min_mask] bags = bags[min_mask] max_mask = mean_of(distributions)< max_mean distributions = distributions[max_mask] bags = bags[max_mask] return bags, distributions def mean_of(distributions): return distributions[:,0] mixed_item_bag_weight_distributions = get_bag_weight_distributions(mixed_item_candiadte_bags) mixed_item_candiadte_bags, mixed_item_bag_weight_distributions = \ filter_by_mean(mixed_item_candiadte_bags, mixed_item_bag_weight_distributions) print("Candidate bags left: {}".format(mixed_item_candiadte_bags.shape))
Santa's Uncertain Bags
149,246
country_number = train_data.shape[0]/day_number country_number<define_variables>
def get_low_weight_item_candidate_bags() : bags = [] distributions = [] for gift in ["ball", "book", "gloves"]: max_quantities = get_low_weight_item_bags_max_quantities_for(gift) candiadte_bags = create_candidate_bags(max_quantities) bags.append(candiadte_bags) cadidate_bag_weight_distributions = get_bag_weight_distributions(candiadte_bags) distributions.append(cadidate_bag_weight_distributions) return np.vstack(bags), np.vstack(distributions) def get_low_weight_item_bags_max_quantities_for(gift): max_quantities = np.ceil(50 / gift_weight_distributions[:, 0]) gift_index = gifts.index(gift) for i in range(len(max_quantities)) : if not i == gift_index: max_quantities[i] = 5 print("Gift {}: number of different bags: {}".format(gift, np.prod(max_quantities))) return max_quantities low_weight_item_candidate_bags, low_weight_item_bag_weight_distributions = \ filter_by_mean(*get_low_weight_item_candidate_bags()) print("Total number of canidate bags: {}".format(low_weight_item_candidate_bags.shape))
Santa's Uncertain Bags
149,246
state_code = 0 train_data['Province_State'][0] = 0 for instance in range(0, int(country_number)) : for state_index in range(0, day_number): train_data['Province_State'][state_index +(day_number * instance)] = state_code state_code += 1<filter>
def drop_duplicate(candidate_bags, distributions): df = pd.DataFrame(data=np.hstack(( candidate_bags, distributions)) , columns=gifts + ["mean", "std"]) df.drop_duplicates(subset=gifts, inplace=True) return df[gifts].values, df[["mean", "std"]].values candidate_bags = np.vstack([mixed_item_candiadte_bags, low_weight_item_candidate_bags]) bag_weight_distributions = np.vstack([mixed_item_bag_weight_distributions, low_weight_item_bag_weight_distributions]) print("Combined candiadte bags: {}".format(candidate_bags.shape)) candidate_bags, bag_weight_distributions = drop_duplicate(candidate_bags, bag_weight_distributions) print("Final candidate bags without duplicates: {}".format(candidate_bags.shape))
Santa's Uncertain Bags
149,246
train_data[train_data['Country_Region'] == 'Turkey']<filter>
def get_bag_utility_distributions(candidate_bags): distributions = [] size = len(candidate_bags) for i, candidate_bag in enumerate(candidate_bags): if i % 7000 == 0: sys.stdout.write("{:.4f}\r".format(float(i)/ float(size))) distributions.append(get_bag_utility_distribution(candidate_bag)) print("") return np.vstack(distributions) def get_bag_utility_distribution(candidate_bag): bag = { gifts[i]: int(candidate_bag[i])for i in range(len(gifts)) if candidate_bag[i] > 0 } sampler, name = create_bag_utility_sampler(bag) sample = sampler(10000) return np.mean(sample), np.var(sample) bag_utility_distributions = get_bag_utility_distributions(candidate_bags) print(bag_utility_distributions.shape )
Santa's Uncertain Bags
149,246
train_data[train_data['Country_Region'] == 'Turkey']['Date']<data_type_conversions>
num_gifts_available = { "horse": 1000, "ball": 1100, "bike": 500, "train": 1000, "book": 1200, "doll": 1000, "blocks": 1000, "gloves": 200, "coal": 166 }
Santa's Uncertain Bags
149,246
time1 = train_data[train_data['Country_Region'] == 'Turkey']['Date'].values[-1] time2 = '2020-04-30' FMT = '%Y-%m-%d' time_diff = datetime.strptime(time2, FMT)- datetime.strptime(time1, FMT )<define_variables>
def pack_linprog(bags, distributions, min_variance, max_bags=1000): c = - distributions[:,0] A_ub = bags.T b_ub = np.array([num_gifts_available[gift] for gift in gifts]) A_ub = np.vstack([A_ub, np.ones(A_ub.shape[1])]) b_ub = np.hstack([b_ub, [max_bags]]) if min_variance is not None: A_ub = np.vstack([A_ub, -distributions[:,1]]) b_ub = np.hstack([b_ub, [-min_variance]]) result = linprog(c, A_ub=A_ub, b_ub=b_ub) if result["success"] == False: return [], True else: return result["x"].astype('int64'), False def pack_bags(bags, distributions, min_variance=None): max_bags = 1000 bag_quantities, infeasible = pack_linprog(bags, distributions, min_variance=min_variance) while np.sum(bag_quantities)< 1000: max_bags += 1 bag_quantities, infeasible = pack_linprog(bags, distributions, min_variance=min_variance, max_bags=max_bags) if max_bags > 1015: print("WARNING: not getting 1000 bags") break if infeasible: continue if infeasible: print("infeasible") return [], [], [] chosen_bag_idx = np.where(bag_quantities)[0] chosen_bags = bags[chosen_bag_idx] chosen_distributions = distributions[chosen_bag_idx] chosen_quantities = bag_quantities[chosen_bag_idx] while np.sum(chosen_quantities)> 1000: idx = np.random.randint(len(chosen_quantities)) chosen_quantities[idx] = max(chosen_quantities[idx]-1, 0) score_distribution = np.dot(chosen_quantities, chosen_distributions) print("{} bags - score distribution: mean = {:.2f} | var = {:.2f}" .format(np.sum(chosen_quantities), score_distribution[0], score_distribution[1])) return chosen_bags, chosen_distributions, chosen_quantities packed_bags, packed_distributions, packed_quantities \ = pack_bags(candidate_bags, bag_utility_distributions, min_variance=None )
Santa's Uncertain Bags
149,246
future_day_number = time_diff.days future_day_number<drop_column>
def evaluate_variances() : results = {} for i, min_variance in enumerate(np.linspace(100000, 410000, num=10)) : bags, distributions, quantities = pack_bags(candidate_bags, bag_utility_distributions, min_variance=min_variance) results[min_variance] = np.dot(quantities, distributions) return results scores_for_min_variance = evaluate_variances()
Santa's Uncertain Bags
149,246
<create_dataframe><EOS>
def create_submissions(bags, quantities, num_submissions=60): def create_stock(n): stock = { gift: list(map(lambda id: "{}_{}".format(gift, id),np.arange(num_gifts_available[gift])))for gift in gifts } return shuffle(stock, n) def shuffle(stock, seed): np.random.seed(seed) for gift in stock.keys() : np.random.shuffle(stock[gift]) return stock def generate_submission(n): stock = create_stock(n) with open("submission_{}.csv".format(n), 'w+')as submission_file: submission_file.write('Gifts ') for i in range(len(bags)) : for quantity in range(quantities[i]): current_gifts = bags[i] for gift_idx, gift_quantity in enumerate(current_gifts[:len(gifts)]): gift_name = gifts[gift_idx] for j in range(int(gift_quantity)) : submission_file.write("{} ".format(stock[gift_name].pop())) submission_file.write(" ") for n in range(num_submissions): generate_submission(n) create_submissions(packed_bags, packed_quantities )
Santa's Uncertain Bags
66,629
<SOS> metric: MAP@{K} Kaggle data source: facebook-v-predicting-check-ins<define_variables>
import numpy as np import pandas as pd
Facebook V: Predicting Check Ins
66,629
country_number = CC_dataset.shape[0] country_number<create_dataframe>
trainDF = pd.read_csv('.. /input/train.csv') testDF = pd.read_csv('.. /input/test.csv' )
Facebook V: Predicting Check Ins
66,629
CC_daily_dataset = pd.DataFrame() Fatality_daily_dataset = pd.DataFrame() for state_id in range(0, country_number): sample_case = CC_dataset.loc[CC_dataset['Province_State'] == state_id] sample_case['Day1CC'] = sample_case['CC1'] sample_fatality = Fatal_dataset.loc[Fatal_dataset['Province_State'] == state_id] sample_fatality['Day1F'] = sample_fatality['F1'] for case_index in range(1, day_number): sample_case['Day' + str(case_index + 1)+ 'CC'] = sample_case['CC' + str(case_index + 1)] - sample_case['CC' + str(case_index)] sample_case = sample_case.drop(['CC' + str(case_index)], axis = 1) sample_fatality['Day' + str(case_index + 1)+ 'F'] = sample_fatality['F' + str(case_index + 1)] - sample_fatality['F' + str(case_index)] sample_fatality = sample_fatality.drop(['F' + str(case_index)], axis = 1) sample_case = sample_case.drop(['CC' + str(day_number)], axis = 1) sample_fatality = sample_fatality.drop(['F' + str(day_number)], axis = 1) CC_daily_dataset = CC_daily_dataset.append(sample_case) Fatality_daily_dataset = Fatality_daily_dataset.append(sample_fatality) <define_variables>
trainDF['x'] = np.floor(trainDF['x'].values * 500.0/10.0) trainDF['y'] = np.floor(trainDF['y'].values * 1000.0/10.0) trainDF['time'] = np.floor(((trainDF['time'].astype(float)+180.0)/360.0)% 4) testDF['x'] = np.floor(testDF['x'].values * 500.0/10.0) testDF['y'] = np.floor(testDF['y'].values * 1000.0/10.0) testDF['time'] = np.floor(((testDF['time'].astype(float)+180.0)/360.0)% 4 )
Facebook V: Predicting Check Ins
66,629
last_day_CC = CC_dataset.iloc[:,-1].values last_day_F = Fatal_dataset.iloc[:,-1].values<filter>
trainDF['identifier'] = trainDF['x'].astype(str)+ ' ' + trainDF['y'].astype(str)+ ' ' + trainDF['time'].astype(str) testDF['identifier'] = testDF['x'].astype(str)+ ' ' + testDF['y'].astype(str)+ ' ' + testDF['time'].astype(str )
Facebook V: Predicting Check Ins
66,629
CC_daily_dataset[CC_daily_dataset['Province_State'] == 223]<drop_column>
def threeMostCommon(listt): return(' ' ).join([str(val[0])for val in Counter(listt ).most_common(3)] )
Facebook V: Predicting Check Ins
66,629
CC_daily_dataset = CC_daily_dataset.drop(['Id', 'Province_State'], axis = 1) Fatality_daily_dataset = Fatality_daily_dataset.drop(['Id', 'Province_State'], axis = 1 )<split>
pivotedTrain = trainDF.pivot_table('place_id','identifier', aggfunc = {'place_id':threeMostCommon} ).reset_index()
Facebook V: Predicting Check Ins
66,629
features_CC = CC_daily_dataset.iloc[:, :-1*(future_day_number)] target_CC = CC_daily_dataset.iloc[:, -1*(future_day_number):] features_F = CC_daily_dataset.iloc[:, :-1*(future_day_number)] target_F = Fatality_daily_dataset.iloc[:, -1*(future_day_number):] train_features_CC, test_features_CC, train_target_CC, test_target_CC = train_test_split(features_CC, target_CC, test_size = 0.2) train_features_F, test_features_F, train_target_F, test_target_F = train_test_split(features_F, target_F, test_size = 0.2) <prepare_x_and_y>
out = pd.merge(testDF[['row_id','identifier']], pivotedTrain, how = 'left') out.sort('row_id', inplace = True) out.drop(['identifier'],axis = 1 ).fillna(0 ).to_csv('output.csv',index = False )
Facebook V: Predicting Check Ins
4,373,850
<SOS> metric: RMSE Kaggle data source: galaxy-zoo-the-galaxy-challenge<data_type_conversions>
df = pd.read_csv('.. /input/44352/training_solutions_rev1.csv') df_train, df_test = train_test_split(df, test_size=.2) df_train.shape, df_test.shape
Galaxy Zoo - The Galaxy Challenge
4,373,850
train["Date"] = train["Date"].apply(lambda x: datetime.strptime(x,'%Y-%m-%d')) test["Date"] = test["Date"].apply(lambda x: datetime.strptime(x,'%Y-%m-%d')) <feature_engineering>
%matplotlib inline ORIG_SHAPE =(424,424) CROP_SIZE =(256,256) IMG_SHAPE =(64,64) def get_image(path, x1,y1, shape, crop_size): x = plt.imread(path) x = x[x1:x1+crop_size[0], y1:y1+crop_size[1]] x = resize(x, shape) x = x/255. return x def get_all_images(dataframe, shape=IMG_SHAPE, crop_size=CROP_SIZE): x1 =(ORIG_SHAPE[0]-CROP_SIZE[0])//2 y1 =(ORIG_SHAPE[1]-CROP_SIZE[1])//2 sel = dataframe.values ids = sel[:,0].astype(int ).astype(str) y_batch = sel[:,1:] x_batch = [] for i in tqdm(ids): x = get_image('.. /input/44352/images_training_rev1/'+i+'.jpg', x1,y1, shape=shape, crop_size=crop_size) x_batch.append(x) x_batch = np.array(x_batch) return x_batch, y_batch X_train, y_train = get_all_images(df_train) X_test, y_test = get_all_images(df_test )
Galaxy Zoo - The Galaxy Challenge
4,373,850
dataset = [train,test] for data in dataset: data['Year'] = data['Date'].dt.year data['Month'] = data['Date'].dt.month data['Day'] = data['Date'].dt.day data['Quarter'] = data['Date'].dt.quarter<data_type_conversions>
def root_mean_squared_error(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true))) model = Sequential() model.add(Conv2D(512,(3, 3), input_shape=(IMG_SHAPE[0], IMG_SHAPE[1], 3))) model.add(Conv2D(256,(3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256,(3, 3))) model.add(Conv2D(128,(3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128,(3, 3))) model.add(Conv2D(128,(3, 3))) model.add(Activation('relu')) model.add(GlobalMaxPooling2D()) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Dense(37)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adamax', metrics=[root_mean_squared_error]) model.summary()
Galaxy Zoo - The Galaxy Challenge
4,373,850
train.insert(10,"TS",0) test.insert(8,"TS",0) train["TS"] = train["Date"].apply(lambda x: x.timestamp()) print(train['Date']) train["TS"] = train["TS"].astype(int) print(train['TS']) <data_type_conversions>
batch_size = 128 model.fit(X_train, y_train, epochs=30, validation_data=(X_test, y_test))
Galaxy Zoo - The Galaxy Challenge
4,373,850
test["TS"] = test["Date"].apply(lambda x: x.timestamp()) print(type(test['TS'][1])) test["TS"] = test["TS"].astype(int) print(type(test['TS'][1]))<groupby>
def test_image_generator(ids, shape=IMG_SHAPE): x1 =(ORIG_SHAPE[0]-CROP_SIZE[0])//2 y1 =(ORIG_SHAPE[1]-CROP_SIZE[1])//2 x_batch = [] for i in ids: x = get_image('.. /input/44352/images_test_rev1/'+i, x1, y1, shape=IMG_SHAPE, crop_size=CROP_SIZE) x_batch.append(x) x_batch = np.array(x_batch) return x_batch val_files = os.listdir('.. /input/44352/images_test_rev1/') val_predictions = [] N_val = len(val_files) for i in tqdm(np.arange(0, N_val, batch_size)) : if i+batch_size > N_val: upper = N_val else: upper = i+batch_size X = test_image_generator(val_files[i:upper]) y_pred = model.predict(X) val_predictions.append(y_pred) val_predictions = np.array(val_predictions) Y_pred = np.vstack(val_predictions) ids = np.array([v.split('.')[0] for v in val_files] ).reshape(len(val_files),1) submission_df = pd.DataFrame(np.hstack(( ids, Y_pred)) , columns=df.columns) submission_df = submission_df.sort_values(by=['GalaxyID']) submission_df.to_csv('sample_submission.csv', index=False )
Galaxy Zoo - The Galaxy Challenge
8,860,770
tt = train.groupby(['Country_Region'])['ConfirmedCases'].sum().sort_values(ascending=False) <import_modules>
train = pd.read_csv("/kaggle/input/covid-diagnostic/covid_19_train.csv") test = pd.read_csv("/kaggle/input/covid-diagnostic/covid_19_test.csv") submission = pd.read_csv("/kaggle/input/covid-diagnostic/covid_19_submission.csv" )
COVID-19 diagnostic
8,860,770
print(type(tt)) print(tt[:,0]) <sort_values>
y = train["covid_19"].values y
COVID-19 diagnostic
8,860,770
train.groupby(['Country_Region'])['Fatalities'].sum().sort_values(ascending=False )<sort_values>
df = pd.concat([train, test]) df.shape
COVID-19 diagnostic
8,860,770
train[train['Province_State'].isna() ==True].groupby(['Country_Region'] ).sum().sort_values(by='ConfirmedCases',ascending=False) <define_variables>
df.isna().mean(0 ).values
COVID-19 diagnostic
8,860,770
map_state = {'US':'United States', 'Congo(Kinshasa)':' Congo', 'Congo(Brazzaville)':'Congo', 'Timor Leste':'East Timor', 'Korea, South':'South Korea', 'Cote d'Ivoire':'Ivory Coast', 'Eswatini':'Swaziland', 'Holy See':'Vatican City', 'Jersey':'United Kingdom', 'Taiwan*':'Taiwan', 'occupied Palestinian territory':'Palestine' }<define_variables>
df = df.drop(( df.isna().mean(0)).index[df.isna().mean(0)> 0.95].values, axis = 'columns' )
COVID-19 diagnostic
8,860,770
train.loc[train['Country_Region']=='US',['Country_Region']]='United States' <categorify>
df.groupby('age_quantile' ).agg({'Platelets': np.median} )
COVID-19 diagnostic
8,860,770
data = [train,test] for dataset in data: dataset.loc[dataset['Country_Region']=='US',['Country_Region']]='United States' dataset.loc[dataset['Country_Region']=='Congo(Kinshasa)',['Country_Region']]='Democratic Republic of the Congo' dataset.loc[dataset['Country_Region']=='Congo(Brazzaville)',['Country_Region']]='Democratic Republic of the Congo' dataset.loc[dataset['Country_Region']=='Timor-Leste',['Country_Region']]='East Timor' dataset.loc[dataset['Country_Region']=='Korea, South',['Country_Region']]='South Korea' dataset.loc[dataset['Country_Region']=='Eswatini',['Country_Region']]='Swaziland' dataset.loc[dataset['Country_Region']=='Holy See',['Country_Region']]='Vatican City' dataset.loc[dataset['Country_Region']=='Jersey',['Country_Region']]='United Kingdom' dataset.loc[dataset['Country_Region']=='Taiwan*',['Country_Region']]='Taiwan' dataset.loc[dataset['Country_Region']=='occupied Palestinian territory',['Country_Region']]='Palestine' dataset.loc[dataset['Country_Region']=='Czechia',['Country_Region']]='Czech Republic' dataset.loc[dataset['Country_Region']=='North Macedonia',['Country_Region']]='Macedonia' dataset.loc[dataset['Country_Region']=='Ireland',['Country_Region']]='Republic of Ireland' dataset.loc[dataset['Country_Region']=='Gambia',['Country_Region']]='The Gambia' dataset.loc[dataset['Country_Region']=='Netherlands',['Country_Region']]='Kingdom of the Netherlands' dataset.loc[dataset['Country_Region']=='Cabo Verde',['Country_Region']]='Cape Verde' dataset.loc[dataset['Country_Region']=='China',['Country_Region']]="People's Republic of China" dataset.loc[dataset['Country_Region']=='Brunei',['Country_Region']]='Brunei Darussalam' <define_variables>
df['Platelets_gr'] = df['Platelets'] - df.groupby('age_quantile')['Platelets'].transform(np.median )
COVID-19 diagnostic
8,860,770
countries = [ {'timezones': ['Europe/Andorra'], 'code': 'AD', 'continent': 'Europe', 'name': 'Andorra', 'capital': 'Andorra la Vella'}, {'timezones': ['Asia/Kabul'], 'code': 'AF', 'continent': 'Asia', 'name': 'Afghanistan', 'capital': 'Kabul'}, {'timezones': ['America/Antigua'], 'code': 'AG', 'continent': 'North America', 'name': 'Antigua and Barbuda', 'capital': "St.John's"}, {'timezones': ['Europe/Tirane'], 'code': 'AL', 'continent': 'Europe', 'name': 'Albania', 'capital': 'Tirana'}, {'timezones': ['Asia/Yerevan'], 'code': 'AM', 'continent': 'Asia', 'name': 'Armenia', 'capital': 'Yerevan'}, {'timezones': ['Africa/Luanda'], 'code': 'AO', 'continent': 'Africa', 'name': 'Angola', 'capital': 'Luanda'}, {'timezones': ['America/Argentina/Buenos_Aires', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/Tucuman', 'America/Argentina/Catamarca', 'America/Argentina/La_Rioja', 'America/Argentina/San_Juan', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Ushuaia'], 'code': 'AR', 'continent': 'South America', 'name': 'Argentina', 'capital': 'Buenos Aires'}, {'timezones': ['Europe/Vienna'], 'code': 'AT', 'continent': 'Europe', 'name': 'Austria', 'capital': 'Vienna'}, {'timezones': ['Australia/Lord_Howe', 'Australia/Hobart', 'Australia/Currie', 'Australia/Melbourne', 'Australia/Sydney', 'Australia/Broken_Hill', 'Australia/Brisbane', 'Australia/Lindeman', 'Australia/Adelaide', 'Australia/Darwin', 'Australia/Perth'], 'code': 'AU', 'continent': 'Oceania', 'name': 'Australia', 'capital': 'Canberra'}, {'timezones': ['Asia/Baku'], 'code': 'AZ', 'continent': 'Asia', 'name': 'Azerbaijan', 'capital': 'Baku'}, {'timezones': ['America/Barbados'], 'code': 'BB', 'continent': 'North America', 'name': 'Barbados', 'capital': 'Bridgetown'}, {'timezones': ['Asia/Dhaka'], 'code': 'BD', 'continent': 'Asia', 'name': 'Bangladesh', 'capital': 'Dhaka'}, {'timezones': ['Europe/Brussels'], 'code': 'BE', 'continent': 'Europe', 'name': 'Belgium', 'capital': 'Brussels'}, {'timezones': ['Africa/Ouagadougou'], 'code': 'BF', 'continent': 'Africa', 'name': 'Burkina Faso', 'capital': 'Ouagadougou'}, {'timezones': ['Europe/Sofia'], 'code': 'BG', 'continent': 'Europe', 'name': 'Bulgaria', 'capital': 'Sofia'}, {'timezones': ['Asia/Bahrain'], 'code': 'BH', 'continent': 'Asia', 'name': 'Bahrain', 'capital': 'Manama'}, {'timezones': ['Africa/Bujumbura'], 'code': 'BI', 'continent': 'Africa', 'name': 'Burundi', 'capital': 'Bujumbura'}, {'timezones': ['Africa/Porto-Novo'], 'code': 'BJ', 'continent': 'Africa', 'name': 'Benin', 'capital': 'Porto-Novo'}, {'timezones': ['Asia/Brunei'], 'code': 'BN', 'continent': 'Asia', 'name': 'Brunei Darussalam', 'capital': 'Bandar Seri Begawan'}, {'timezones': ['America/La_Paz'], 'code': 'BO', 'continent': 'South America', 'name': 'Bolivia', 'capital': 'Sucre'}, {'timezones': ['America/Noronha', 'America/Belem', 'America/Fortaleza', 'America/Recife', 'America/Araguaina', 'America/Maceio', 'America/Bahia', 'America/Sao_Paulo', 'America/Campo_Grande', 'America/Cuiaba', 'America/Porto_Velho', 'America/Boa_Vista', 'America/Manaus', 'America/Eirunepe', 'America/Rio_Branco'], 'code': 'BR', 'continent': 'South America', 'name': 'Brazil', 'capital': 'Bras\xc3\xadlia'}, {'timezones': ['America/Nassau'], 'code': 'BS', 'continent': 'North America', 'name': 'Bahamas', 'capital': 'Nassau'}, {'timezones': ['Asia/Thimphu'], 'code': 'BT', 'continent': 'Asia', 'name': 'Bhutan', 'capital': 'Thimphu'}, {'timezones': ['Africa/Gaborone'], 'code': 'BW', 'continent': 'Africa', 'name': 'Botswana', 'capital': 'Gaborone'}, {'timezones': ['Europe/Minsk'], 'code': 'BY', 'continent': 'Europe', 'name': 'Belarus', 'capital': 'Minsk'}, {'timezones': ['America/Belize'], 'code': 'BZ', 'continent': 'North America', 'name': 'Belize', 'capital': 'Belmopan'}, {'timezones': ['America/St_Johns', 'America/Halifax', 'America/Glace_Bay', 'America/Moncton', 'America/Goose_Bay', 'America/Blanc-Sablon', 'America/Montreal', 'America/Toronto', 'America/Nipigon', 'America/Thunder_Bay', 'America/Pangnirtung', 'America/Iqaluit', 'America/Atikokan', 'America/Rankin_Inlet', 'America/Winnipeg', 'America/Rainy_River', 'America/Cambridge_Bay', 'America/Regina', 'America/Swift_Current', 'America/Edmonton', 'America/Yellowknife', 'America/Inuvik', 'America/Dawson_Creek', 'America/Vancouver', 'America/Whitehorse', 'America/Dawson'], 'code': 'CA', 'continent': 'North America', 'name': 'Canada', 'capital': 'Ottawa'}, {'timezones': ['Africa/Kinshasa', 'Africa/Lubumbashi'], 'code': 'CD', 'continent': 'Africa', 'name': 'Democratic Republic of the Congo', 'capital': 'Kinshasa'}, {'timezones': ['Africa/Brazzaville'], 'code': 'CG', 'continent': 'Africa', 'name': 'Republic of the Congo', 'capital': 'Brazzaville'}, {'timezones': ['Africa/Abidjan'], 'code': 'CI', 'continent': 'Africa', 'name': "Cote d'Ivoire", 'capital': 'Yamoussoukro'}, {'timezones': ['America/Santiago', 'Pacific/Easter'], 'code': 'CL', 'continent': 'South America', 'name': 'Chile', 'capital': 'Santiago'}, {'timezones': ['Africa/Douala'], 'code': 'CM', 'continent': 'Africa', 'name': 'Cameroon', 'capital': 'Yaound\xc3\xa9'}, {'timezones': ['Asia/Shanghai', 'Asia/Harbin', 'Asia/Chongqing', 'Asia/Urumqi', 'Asia/Kashgar'], 'code': 'CN', 'continent': 'Asia', 'name': "People's Republic of China", 'capital': 'Beijing'}, {'timezones': ['America/Bogota'], 'code': 'CO', 'continent': 'South America', 'name': 'Colombia', 'capital': 'Bogot\xc3\xa1'}, {'timezones': ['America/Costa_Rica'], 'code': 'CR', 'continent': 'North America', 'name': 'Costa Rica', 'capital': 'San Jos\xc3\xa9'}, {'timezones': ['America/Havana'], 'code': 'CU', 'continent': 'North America', 'name': 'Cuba', 'capital': 'Havana'}, {'timezones': ['Atlantic/Cape_Verde'], 'code': 'CV', 'continent': 'Africa', 'name': 'Cape Verde', 'capital': 'Praia'}, {'timezones': ['Asia/Nicosia'], 'code': 'CY', 'continent': 'Asia', 'name': 'Cyprus', 'capital': 'Nicosia'}, {'timezones': ['Europe/Prague'], 'code': 'CZ', 'continent': 'Europe', 'name': 'Czech Republic', 'capital': 'Prague'}, {'timezones': ['Europe/Berlin'], 'code': 'DE', 'continent': 'Europe', 'name': 'Germany', 'capital': 'Berlin'}, {'timezones': ['Africa/Djibouti'], 'code': 'DJ', 'continent': 'Africa', 'name': 'Djibouti', 'capital': 'Djibouti City'}, {'timezones': ['Europe/Copenhagen'], 'code': 'DK', 'continent': 'Europe', 'name': 'Denmark', 'capital': 'Copenhagen'}, {'timezones': ['America/Dominica'], 'code': 'DM', 'continent': 'North America', 'name': 'Dominica', 'capital': 'Roseau'}, {'timezones': ['America/Santo_Domingo'], 'code': 'DO', 'continent': 'North America', 'name': 'Dominican Republic', 'capital': 'Santo Domingo'}, {'timezones': ['America/Guayaquil', 'Pacific/Galapagos'], 'code': 'EC', 'continent': 'South America', 'name': 'Ecuador', 'capital': 'Quito'}, {'timezones': ['Europe/Tallinn'], 'code': 'EE', 'continent': 'Europe', 'name': 'Estonia', 'capital': 'Tallinn'}, {'timezones': ['Africa/Cairo'], 'code': 'EG', 'continent': 'Africa', 'name': 'Egypt', 'capital': 'Cairo'}, {'timezones': ['Africa/Asmera'], 'code': 'ER', 'continent': 'Africa', 'name': 'Eritrea', 'capital': 'Asmara'}, {'timezones': ['Africa/Addis_Ababa'], 'code': 'ET', 'continent': 'Africa', 'name': 'Ethiopia', 'capital': 'Addis Ababa'}, {'timezones': ['Europe/Helsinki'], 'code': 'FI', 'continent': 'Europe', 'name': 'Finland', 'capital': 'Helsinki'}, {'timezones': ['Pacific/Fiji'], 'code': 'FJ', 'continent': 'Oceania', 'name': 'Fiji', 'capital': 'Suva'}, {'timezones': ['Europe/Paris'], 'code': 'FR', 'continent': 'Europe', 'name': 'France', 'capital': 'Paris'}, {'timezones': ['Africa/Libreville'], 'code': 'GA', 'continent': 'Africa', 'name': 'Gabon', 'capital': 'Libreville'}, {'timezones': ['Asia/Tbilisi'], 'code': 'GE', 'continent': 'Asia', 'name': 'Georgia', 'capital': 'Tbilisi'}, {'timezones': ['Africa/Accra'], 'code': 'GH', 'continent': 'Africa', 'name': 'Ghana', 'capital': 'Accra'}, {'timezones': ['Africa/Banjul'], 'code': 'GM', 'continent': 'Africa', 'name': 'The Gambia', 'capital': 'Banjul'}, {'timezones': ['Africa/Conakry'], 'code': 'GN', 'continent': 'Africa', 'name': 'Guinea', 'capital': 'Conakry'}, {'timezones': ['Europe/Athens'], 'code': 'GR', 'continent': 'Europe', 'name': 'Greece', 'capital': 'Athens'}, {'timezones': ['America/Guatemala'], 'code': 'GT', 'continent': 'North America', 'name': 'Guatemala', 'capital': 'Guatemala City'}, {'timezones': ['America/Guatemala'], 'code': 'HT', 'continent': 'North America', 'name': 'Haiti', 'capital': 'Port-au-Prince'}, {'timezones': ['Africa/Bissau'], 'code': 'GW', 'continent': 'Africa', 'name': 'Guinea-Bissau', 'capital': 'Bissau'}, {'timezones': ['America/Guyana'], 'code': 'GY', 'continent': 'South America', 'name': 'Guyana', 'capital': 'Georgetown'}, {'timezones': ['America/Tegucigalpa'], 'code': 'HN', 'continent': 'North America', 'name': 'Honduras', 'capital': 'Tegucigalpa'}, {'timezones': ['Europe/Budapest'], 'code': 'HU', 'continent': 'Europe', 'name': 'Hungary', 'capital': 'Budapest'}, {'timezones': ['Asia/Jakarta', 'Asia/Pontianak', 'Asia/Makassar', 'Asia/Jayapura'], 'code': 'ID', 'continent': 'Asia', 'name': 'Indonesia', 'capital': 'Jakarta'}, {'timezones': ['Europe/Dublin'], 'code': 'IE', 'continent': 'Europe', 'name': 'Republic of Ireland', 'capital': 'Dublin'}, {'timezones': ['Asia/Jerusalem'], 'code': 'IL', 'continent': 'Asia', 'name': 'Israel', 'capital': 'Jerusalem'}, {'timezones': ['Asia/Calcutta'], 'code': 'IN', 'continent': 'Asia', 'name': 'India', 'capital': 'New Delhi'}, {'timezones': ['Asia/Baghdad'], 'code': 'IQ', 'continent': 'Asia', 'name': 'Iraq', 'capital': 'Baghdad'}, {'timezones': ['Asia/Tehran'], 'code': 'IR', 'continent': 'Asia', 'name': 'Iran', 'capital': 'Tehran'}, {'timezones': ['Atlantic/Reykjavik'], 'code': 'IS', 'continent': 'Europe', 'name': 'Iceland', 'capital': 'Reykjav\xc3\xadk'}, {'timezones': ['Europe/Rome'], 'code': 'IT', 'continent': 'Europe', 'name': 'Italy', 'capital': 'Rome'}, {'timezones': ['America/Jamaica'], 'code': 'JM', 'continent': 'North America', 'name': 'Jamaica', 'capital': 'Kingston'}, {'timezones': ['Asia/Amman'], 'code': 'JO', 'continent': 'Asia', 'name': 'Jordan', 'capital': 'Amman'}, {'timezones': ['Asia/Tokyo'], 'code': 'JP', 'continent': 'Asia', 'name': 'Japan', 'capital': 'Tokyo'}, {'timezones': ['Africa/Nairobi'], 'code': 'KE', 'continent': 'Africa', 'name': 'Kenya', 'capital': 'Nairobi'}, {'timezones': ['Asia/Bishkek'], 'code': 'KG', 'continent': 'Asia', 'name': 'Kyrgyzstan', 'capital': 'Bishkek'}, {'timezones': ['Pacific/Tarawa', 'Pacific/Enderbury', 'Pacific/Kiritimati'], 'code': 'KI', 'continent': 'Oceania', 'name': 'Kiribati', 'capital': 'Tarawa'}, {'timezones': ['Asia/Pyongyang'], 'code': 'KP', 'continent': 'Asia', 'name': 'North Korea', 'capital': 'Pyongyang'}, {'timezones': ['Asia/Seoul'], 'code': 'KR', 'continent': 'Asia', 'name': 'South Korea', 'capital': 'Seoul'}, {'timezones': ['Asia/Kuwait'], 'code': 'KW', 'continent': 'Asia', 'name': 'Kuwait', 'capital': 'Kuwait City'}, {'timezones': ['Asia/Beirut'], 'code': 'LB', 'continent': 'Asia', 'name': 'Lebanon', 'capital': 'Beirut'}, {'timezones': ['Europe/Vaduz'], 'code': 'LI', 'continent': 'Europe', 'name': 'Liechtenstein', 'capital': 'Vaduz'}, {'timezones': ['Africa/Monrovia'], 'code': 'LR', 'continent': 'Africa', 'name': 'Liberia', 'capital': 'Monrovia'}, {'timezones': ['Africa/Maseru'], 'code': 'LS', 'continent': 'Africa', 'name': 'Lesotho', 'capital': 'Maseru'}, {'timezones': ['Europe/Vilnius'], 'code': 'LT', 'continent': 'Europe', 'name': 'Lithuania', 'capital': 'Vilnius'}, {'timezones': ['Europe/Luxembourg'], 'code': 'LU', 'continent': 'Europe', 'name': 'Luxembourg', 'capital': 'Luxembourg City'}, {'timezones': ['Europe/Riga'], 'code': 'LV', 'continent': 'Europe', 'name': 'Latvia', 'capital': 'Riga'}, {'timezones': ['Africa/Tripoli'], 'code': 'LY', 'continent': 'Africa', 'name': 'Libya', 'capital': 'Tripoli'}, {'timezones': ['Indian/Antananarivo'], 'code': 'MG', 'continent': 'Africa', 'name': 'Madagascar', 'capital': 'Antananarivo'}, {'timezones': ['Pacific/Majuro', 'Pacific/Kwajalein'], 'code': 'MH', 'continent': 'Oceania', 'name': 'Marshall Islands', 'capital': 'Majuro'}, {'timezones': ['Europe/Skopje'], 'code': 'MK', 'continent': 'Europe', 'name': 'Macedonia', 'capital': 'Skopje'}, {'timezones': ['Africa/Bamako'], 'code': 'ML', 'continent': 'Africa', 'name': 'Mali', 'capital': 'Bamako'}, {'timezones': ['Asia/Rangoon'], 'code': 'MM', 'continent': 'Asia', 'name': 'Myanmar', 'capital': 'Naypyidaw'}, {'timezones': ['Asia/Ulaanbaatar', 'Asia/Hovd', 'Asia/Choibalsan'], 'code': 'MN', 'continent': 'Asia', 'name': 'Mongolia', 'capital': 'Ulaanbaatar'}, {'timezones': ['Africa/Nouakchott'], 'code': 'MR', 'continent': 'Africa', 'name': 'Mauritania', 'capital': 'Nouakchott'}, {'timezones': ['Europe/Malta'], 'code': 'MT', 'continent': 'Europe', 'name': 'Malta', 'capital': 'Valletta'}, {'timezones': ['Indian/Mauritius'], 'code': 'MU', 'continent': 'Africa', 'name': 'Mauritius', 'capital': 'Port Louis'}, {'timezones': ['Indian/Maldives'], 'code': 'MV', 'continent': 'Asia', 'name': 'Maldives', 'capital': 'Mal\xc3\xa9'}, {'timezones': ['Africa/Blantyre'], 'code': 'MW', 'continent': 'Africa', 'name': 'Malawi', 'capital': 'Lilongwe'}, {'timezones': ['America/Mexico_City', 'America/Cancun', 'America/Merida', 'America/Monterrey', 'America/Mazatlan', 'America/Chihuahua', 'America/Hermosillo', 'America/Tijuana'], 'code': 'MX', 'continent': 'North America', 'name': 'Mexico', 'capital': 'Mexico City'}, {'timezones': ['Asia/Kuala_Lumpur', 'Asia/Kuching'], 'code': 'MY', 'continent': 'Asia', 'name': 'Malaysia', 'capital': 'Kuala Lumpur'}, {'timezones': ['Africa/Maputo'], 'code': 'MZ', 'continent': 'Africa', 'name': 'Mozambique', 'capital': 'Maputo'}, {'timezones': ['Africa/Windhoek'], 'code': 'NA', 'continent': 'Africa', 'name': 'Namibia', 'capital': 'Windhoek'}, {'timezones': ['Africa/Niamey'], 'code': 'NE', 'continent': 'Africa', 'name': 'Niger', 'capital': 'Niamey'}, {'timezones': ['Africa/Lagos'], 'code': 'NG', 'continent': 'Africa', 'name': 'Nigeria', 'capital': 'Abuja'}, {'timezones': ['America/Managua'], 'code': 'NI', 'continent': 'North America', 'name': 'Nicaragua', 'capital': 'Managua'}, {'timezones': ['Europe/Amsterdam'], 'code': 'NL', 'continent': 'Europe', 'name': 'Kingdom of the Netherlands', 'capital': 'Amsterdam'}, {'timezones': ['Europe/Oslo'], 'code': 'NO', 'continent': 'Europe', 'name': 'Norway', 'capital': 'Oslo'}, {'timezones': ['Asia/Katmandu'], 'code': 'NP', 'continent': 'Asia', 'name': 'Nepal', 'capital': 'Kathmandu'}, {'timezones': ['Pacific/Nauru'], 'code': 'NR', 'continent': 'Oceania', 'name': 'Nauru', 'capital': 'Yaren'}, {'timezones': ['Pacific/Auckland', 'Pacific/Chatham'], 'code': 'NZ', 'continent': 'Oceania', 'name': 'New Zealand', 'capital': 'Wellington'}, {'timezones': ['Asia/Muscat'], 'code': 'OM', 'continent': 'Asia', 'name': 'Oman', 'capital': 'Muscat'}, {'timezones': ['America/Panama'], 'code': 'PA', 'continent': 'North America', 'name': 'Panama', 'capital': 'Panama City'}, {'timezones': ['America/Lima'], 'code': 'PE', 'continent': 'South America', 'name': 'Peru', 'capital': 'Lima'}, {'timezones': ['Pacific/Port_Moresby'], 'code': 'PG', 'continent': 'Oceania', 'name': 'Papua New Guinea', 'capital': 'Port Moresby'}, {'timezones': ['Asia/Manila'], 'code': 'PH', 'continent': 'Asia', 'name': 'Philippines', 'capital': 'Manila'}, {'timezones': ['Asia/Karachi'], 'code': 'PK', 'continent': 'Asia', 'name': 'Pakistan', 'capital': 'Islamabad'}, {'timezones': ['Europe/Warsaw'], 'code': 'PL', 'continent': 'Europe', 'name': 'Poland', 'capital': 'Warsaw'}, {'timezones': ['Europe/Lisbon', 'Atlantic/Madeira', 'Atlantic/Azores'], 'code': 'PT', 'continent': 'Europe', 'name': 'Portugal', 'capital': 'Lisbon'}, {'timezones': ['Pacific/Palau'], 'code': 'PW', 'continent': 'Oceania', 'name': 'Palau', 'capital': 'Ngerulmud'}, {'timezones': ['America/Asuncion'], 'code': 'PY', 'continent': 'South America', 'name': 'Paraguay', 'capital': 'Asunci\xc3\xb3n'}, {'timezones': ['Asia/Qatar'], 'code': 'QA', 'continent': 'Asia', 'name': 'Qatar', 'capital': 'Doha'}, {'timezones': ['Europe/Bucharest'], 'code': 'RO', 'continent': 'Europe', 'name': 'Romania', 'capital': 'Bucharest'}, {'timezones': ['Europe/Kaliningrad', 'Europe/Moscow', 'Europe/Volgograd', 'Europe/Samara', 'Asia/Yekaterinburg', 'Asia/Omsk', 'Asia/Novosibirsk', 'Asia/Krasnoyarsk', 'Asia/Irkutsk', 'Asia/Yakutsk', 'Asia/Vladivostok', 'Asia/Sakhalin', 'Asia/Magadan', 'Asia/Kamchatka', 'Asia/Anadyr'], 'code': 'RU', 'continent': 'Europe', 'name': 'Russia', 'capital': 'Moscow'}, {'timezones': ['Africa/Kigali'], 'code': 'RW', 'continent': 'Africa', 'name': 'Rwanda', 'capital': 'Kigali'}, {'timezones': ['Asia/Riyadh'], 'code': 'SA', 'continent': 'Asia', 'name': 'Saudi Arabia', 'capital': 'Riyadh'}, {'timezones': ['Pacific/Guadalcanal'], 'code': 'SB', 'continent': 'Oceania', 'name': 'Solomon Islands', 'capital': 'Honiara'}, {'timezones': ['Indian/Mahe'], 'code': 'SC', 'continent': 'Africa', 'name': 'Seychelles', 'capital': 'Victoria'}, {'timezones': ['Africa/Khartoum'], 'code': 'SD', 'continent': 'Africa', 'name': 'Sudan', 'capital': 'Khartoum'}, {'timezones': ['Europe/Stockholm'], 'code': 'SE', 'continent': 'Europe', 'name': 'Sweden', 'capital': 'Stockholm'}, {'timezones': ['Asia/Singapore'], 'code': 'SG', 'continent': 'Asia', 'name': 'Singapore', 'capital': 'Singapore'}, {'timezones': ['Europe/Ljubljana'], 'code': 'SI', 'continent': 'Europe', 'name': 'Slovenia', 'capital': 'Ljubljana'}, {'timezones': ['Europe/Bratislava'], 'code': 'SK', 'continent': 'Europe', 'name': 'Slovakia', 'capital': 'Bratislava'}, {'timezones': ['Africa/Freetown'], 'code': 'SL', 'continent': 'Africa', 'name': 'Sierra Leone', 'capital': 'Freetown'}, {'timezones': ['Europe/San_Marino'], 'code': 'SM', 'continent': 'Europe', 'name': 'San Marino', 'capital': 'San Marino'}, {'timezones': ['Africa/Dakar'], 'code': 'SN', 'continent': 'Africa', 'name': 'Senegal', 'capital': 'Dakar'}, {'timezones': ['Africa/Mogadishu'], 'code': 'SO', 'continent': 'Africa', 'name': 'Somalia', 'capital': 'Mogadishu'}, {'timezones': ['America/Paramaribo'], 'code': 'SR', 'continent': 'South America', 'name': 'Suriname', 'capital': 'Paramaribo'}, {'timezones': ['Africa/Sao_Tome'], 'code': 'ST', 'continent': 'Africa', 'name': 'S\xc3\xa3o Tom\xc3\xa9 and Pr\xc3\xadncipe', 'capital': 'S\xc3\xa3o Tom\xc3\xa9'}, {'timezones': ['Asia/Damascus'], 'code': 'SY', 'continent': 'Asia', 'name': 'Syria', 'capital': 'Damascus'}, {'timezones': ['Africa/Lome'], 'code': 'TG', 'continent': 'Africa', 'name': 'Togo', 'capital': 'Lom\xc3\xa9'}, {'timezones': ['Asia/Bangkok'], 'code': 'TH', 'continent': 'Asia', 'name': 'Thailand', 'capital': 'Bangkok'}, {'timezones': ['Asia/Dushanbe'], 'code': 'TJ', 'continent': 'Asia', 'name': 'Tajikistan', 'capital': 'Dushanbe'}, {'timezones': ['Asia/Ashgabat'], 'code': 'TM', 'continent': 'Asia', 'name': 'Turkmenistan', 'capital': 'Ashgabat'}, {'timezones': ['Africa/Tunis'], 'code': 'TN', 'continent': 'Africa', 'name': 'Tunisia', 'capital': 'Tunis'}, {'timezones': ['Pacific/Tongatapu'], 'code': 'TO', 'continent': 'Oceania', 'name': 'Tonga', 'capital': 'Nuku\xca\xbbalofa'}, {'timezones': ['Europe/Istanbul'], 'code': 'TR', 'continent': 'Asia', 'name': 'Turkey', 'capital': 'Ankara'}, {'timezones': ['America/Port_of_Spain'], 'code': 'TT', 'continent': 'North America', 'name': 'Trinidad and Tobago', 'capital': 'Port of Spain'}, {'timezones': ['Pacific/Funafuti'], 'code': 'TV', 'continent': 'Oceania', 'name': 'Tuvalu', 'capital': 'Funafuti'}, {'timezones': ['Africa/Dar_es_Salaam'], 'code': 'TZ', 'continent': 'Africa', 'name': 'Tanzania', 'capital': 'Dodoma'}, {'timezones': ['Europe/Kiev', 'Europe/Uzhgorod', 'Europe/Zaporozhye', 'Europe/Simferopol'], 'code': 'UA', 'continent': 'Europe', 'name': 'Ukraine', 'capital': 'Kiev'}, {'timezones': ['Africa/Kampala'], 'code': 'UG', 'continent': 'Africa', 'name': 'Uganda', 'capital': 'Kampala'}, {'timezones': ['America/New_York', 'America/Detroit', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Indiana/Indianapolis', 'America/Indiana/Marengo', 'America/Indiana/Knox', 'America/Indiana/Vevay', 'America/Chicago', 'America/Indiana/Vincennes', 'America/Indiana/Petersburg', 'America/Menominee', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Denver', 'America/Boise', 'America/Shiprock', 'America/Phoenix', 'America/Los_Angeles', 'America/Anchorage', 'America/Juneau', 'America/Yakutat', 'America/Nome', 'America/Adak', 'Pacific/Honolulu'], 'code': 'US', 'continent': 'North America', 'name': 'United States', 'capital': 'Washington, D.C.'}, {'timezones': ['America/Montevideo'], 'code': 'UY', 'continent': 'South America', 'name': 'Uruguay', 'capital': 'Montevideo'}, {'timezones': ['Asia/Samarkand', 'Asia/Tashkent'], 'code': 'UZ', 'continent': 'Asia', 'name': 'Uzbekistan', 'capital': 'Tashkent'}, {'timezones': ['Europe/Vatican'], 'code': 'VA', 'continent': 'Europe', 'name': 'Vatican City', 'capital': 'Vatican City'}, {'timezones': ['America/Caracas'], 'code': 'VE', 'continent': 'South America', 'name': 'Venezuela', 'capital': 'Caracas'}, {'timezones': ['Asia/Saigon'], 'code': 'VN', 'continent': 'Asia', 'name': 'Vietnam', 'capital': 'Hanoi'}, {'timezones': ['Pacific/Efate'], 'code': 'VU', 'continent': 'Oceania', 'name': 'Vanuatu', 'capital': 'Port Vila'}, {'timezones': ['Asia/Aden'], 'code': 'YE', 'continent': 'Asia', 'name': 'Yemen', 'capital': "Sana'a"}, {'timezones': ['Africa/Lusaka'], 'code': 'ZM', 'continent': 'Africa', 'name': 'Zambia', 'capital': 'Lusaka'}, {'timezones': ['Africa/Harare'], 'code': 'ZW', 'continent': 'Africa', 'name': 'Zimbabwe', 'capital': 'Harare'}, {'timezones': ['Africa/Algiers'], 'code': 'DZ', 'continent': 'Africa', 'name': 'Algeria', 'capital': 'Algiers'}, {'timezones': ['Europe/Sarajevo'], 'code': 'BA', 'continent': 'Europe', 'name': 'Bosnia and Herzegovina', 'capital': 'Sarajevo'}, {'timezones': ['Asia/Phnom_Penh'], 'code': 'KH', 'continent': 'Asia', 'name': 'Cambodia', 'capital': 'Phnom Penh'}, {'timezones': ['Africa/Bangui'], 'code': 'CF', 'continent': 'Africa', 'name': 'Central African Republic', 'capital': 'Bangui'}, {'timezones': ['Africa/Ndjamena'], 'code': 'TD', 'continent': 'Africa', 'name': 'Chad', 'capital': "N'Djamena"}, {'timezones': ['Indian/Comoro'], 'code': 'KM', 'continent': 'Africa', 'name': 'Comoros', 'capital': 'Moroni'}, {'timezones': ['Europe/Zagreb'], 'code': 'HR', 'continent': 'Europe', 'name': 'Croatia', 'capital': 'Zagreb'}, {'timezones': ['Asia/Dili'], 'code': 'TL', 'continent': 'Asia', 'name': 'East Timor', 'capital': 'Dili'}, {'timezones': ['America/El_Salvador'], 'code': 'SV', 'continent': 'North America', 'name': 'El Salvador', 'capital': 'San Salvador'}, {'timezones': ['Africa/Malabo'], 'code': 'GQ', 'continent': 'Africa', 'name': 'Equatorial Guinea', 'capital': 'Malabo'}, {'timezones': ['America/Grenada'], 'code': 'GD', 'continent': 'North America', 'name': 'Grenada', 'capital': "St.George's"}, {'timezones': ['Asia/Almaty', 'Asia/Qyzylorda', 'Asia/Aqtobe', 'Asia/Aqtau', 'Asia/Oral'], 'code': 'KZ', 'continent': 'Asia', 'name': 'Kazakhstan', 'capital': 'Astana'}, {'timezones': ['Asia/Vientiane'], 'code': 'LA', 'continent': 'Asia', 'name': 'Laos', 'capital': 'Vientiane'}, {'timezones': ['Pacific/Truk', 'Pacific/Ponape', 'Pacific/Kosrae'], 'code': 'FM', 'continent': 'Oceania', 'name': 'Federated States of Micronesia', 'capital': 'Palikir'}, {'timezones': ['Europe/Chisinau'], 'code': 'MD', 'continent': 'Europe', 'name': 'Moldova', 'capital': 'Chi\xc5\x9fin\xc4\x83u'}, {'timezones': ['Europe/Monaco'], 'code': 'MC', 'continent': 'Europe', 'name': 'Monaco', 'capital': 'Monaco'}, {'timezones': ['Europe/Podgorica'], 'code': 'ME', 'continent': 'Europe', 'name': 'Montenegro', 'capital': 'Podgorica'}, {'timezones': ['Africa/Casablanca'], 'code': 'MA', 'continent': 'Africa', 'name': 'Morocco', 'capital': 'Rabat'}, {'timezones': ['America/St_Kitts'], 'code': 'KN', 'continent': 'North America', 'name': 'Saint Kitts and Nevis', 'capital': 'Basseterre'}, {'timezones': ['America/St_Lucia'], 'code': 'LC', 'continent': 'North America', 'name': 'Saint Lucia', 'capital': 'Castries'}, {'timezones': ['America/St_Vincent'], 'code': 'VC', 'continent': 'North America', 'name': 'Saint Vincent and the Grenadines', 'capital': 'Kingstown'}, {'timezones': ['Pacific/Apia'], 'code': 'WS', 'continent': 'Oceania', 'name': 'Samoa', 'capital': 'Apia'}, {'timezones': ['Europe/Belgrade'], 'code': 'RS', 'continent': 'Europe', 'name': 'Serbia', 'capital': 'Belgrade'}, {'timezones': ['Africa/Johannesburg'], 'code': 'ZA', 'continent': 'Africa', 'name': 'South Africa', 'capital': 'Pretoria'}, {'timezones': ['Europe/Madrid', 'Africa/Ceuta', 'Atlantic/Canary'], 'code': 'ES', 'continent': 'Europe', 'name': 'Spain', 'capital': 'Madrid'}, {'timezones': ['Asia/Colombo'], 'code': 'LK', 'continent': 'Asia', 'name': 'Sri Lanka', 'capital': 'Sri Jayewardenepura Kotte'}, {'timezones': ['Africa/Mbabane'], 'code': 'SZ', 'continent': 'Africa', 'name': 'Swaziland', 'capital': 'Mbabane'}, {'timezones': ['Europe/Zurich'], 'code': 'CH', 'continent': 'Europe', 'name': 'Switzerland', 'capital': 'Bern'}, {'timezones': ['Asia/Dubai'], 'code': 'AE', 'continent': 'Asia', 'name': 'United Arab Emirates', 'capital': 'Abu Dhabi'}, {'timezones': ['Europe/London'], 'code': 'GB', 'continent': 'Europe', 'name': 'United Kingdom', 'capital': 'London'}, {'timezones': ['Asia/Taipei'], 'code': 'TW', 'continent': 'Asia', 'name': 'Taiwan', 'capital': 'Taipei'}, {'timezones': ['Asia/Hong_Kong'], 'code': 'HK', 'continent': 'Asia', 'name': 'Hong Kong', 'capital': 'Hong Kong'}, {'timezones': ['CET'], 'code': 'XK', 'continent': 'Europe', 'name': 'Kosovo', 'capital': 'Pristina'}, {'timezones': ['America/Puerto_Rico'], 'code': 'PR', 'continent': 'America', 'name': 'Puerto Rico', 'capital': 'San Juan'}, ]<feature_engineering>
df.groupby('age_quantile' ).agg({'Monocytes': np.median} )
COVID-19 diagnostic
8,860,770
def def_value() : return 'apple' conti = defaultdict(def_value) tu=[] for i in countries: conti[i['name']] = i['continent'] tu.append(i['name']) tu = set(tu )<filter>
df['Monocytes_gr'] = df['Monocytes'] - df.groupby('age_quantile')['Monocytes'].transform(np.median) df['Leukocytes_gr'] = df['Leukocytes'] - df.groupby('age_quantile')['Leukocytes'].transform(np.median )
COVID-19 diagnostic
8,860,770
print(train.loc[train['Country_Region']=='United States'] )<define_variables>
df.fillna(-999, inplace = True) df
COVID-19 diagnostic
8,860,770
ty=[] for i in range(0,len(train)) : ty.append(train['Country_Region'][i]) ty = set(ty )<compute_test_metric>
df = df.drop('covid_19', axis='columns' )
COVID-19 diagnostic
8,860,770
print(ty-tu )<prepare_output>
df = df.drop('id', axis='columns' )
COVID-19 diagnostic
8,860,770
train.insert(11,'Continent','') test.insert(9,'Continent','' )<categorify>
train_X = df.iloc[:4000, :].select_dtypes(exclude=['object'] ).values test_X = df.iloc[4000:, :].select_dtypes(exclude=['object'] ).values
COVID-19 diagnostic
8,860,770
data = [train,test] for dataset in data: dataset['Continent'] = dataset['Country_Region'].map(conti) <data_type_conversions>
COVID-19 diagnostic
8,860,770
data = [train,test] for dataset in data: dataset['Province_State'] = dataset['Province_State'].fillna(dataset['Country_Region'] )<count_missing_values>
COVID-19 diagnostic
8,860,770
print(train.isnull().sum() )<count_missing_values>
COVID-19 diagnostic
8,860,770
print(train.isnull().sum() )<prepare_x_and_y>
tree_clf = DecisionTreeClassifier( max_depth=10, min_samples_leaf=28, max_features=0.94, criterion="gini", random_state=1) dt = tree_clf.fit(train_X, y) y_pred = dt.predict_proba(test_X )
COVID-19 diagnostic
8,860,770
<install_modules>
imp = pd.DataFrame({ 'colnames': df.columns, 'importances': tree_clf.feature_importances_ }) good_columns = imp.loc[imp['importances'] != 0, :].colnames.values
COVID-19 diagnostic
8,860,770
!pip install pycountry-convert <prepare_x_and_y>
train_X = df.iloc[:4000, :].loc[:, good_columns].values test_X = df.iloc[4000:, :].loc[:, good_columns].values
COVID-19 diagnostic
8,860,770
X_train = train.iloc[:,[1, 2,11,6,7,8,9,10]].values X_test = test.iloc[:,[1,2,9,4,5,6,7,8]].values Y_train = train.iloc[:,[4]].values Y_train1 = train.iloc[:,[5]].values<categorify>
COVID-19 diagnostic
8,860,770
lbl = LabelEncoder() X_train[:, 0] = lbl.fit_transform(X_train[:, 0]) X_train[:, 1] = lbl.fit_transform(X_train[:, 1]) X_train[:, 2] = lbl.fit_transform(X_train[:, 2]) X_test[:, 0] = lbl.fit_transform(X_test[:, 0]) X_test[:, 1] = lbl.fit_transform(X_test[:, 1]) X_test[:, 2] = lbl.fit_transform(X_test[:, 2]) ct = ColumnTransformer( [('one_hot_encoder', OneHotEncoder(categories='auto'), [0, 1,2 ])], remainder='passthrough' ) X_train = ct.fit_transform(X_train ).toarray() X_test = ct.fit_transform(X_test ).toarray()<train_model>
import lightgbm as lgb
COVID-19 diagnostic
8,860,770
regressor = DecisionTreeRegressor(random_state = 1) regressor.fit(X_train,Y_train) y_pred = regressor.predict(X_test) regressor1 = DecisionTreeRegressor(random_state = 1) regressor.fit(X_train,Y_train1) y_pred1= regressor.predict(X_test) <data_type_conversions>
bt = lgb.LGBMClassifier(colsample_bytree=0.85, min_child_samples=20, min_split_gain=0, n_estimators=250, num_leaves=10, reg_alpha=0.0, reg_lambda=0.1, subsample=1, n_jobs=-1, objective='binary', boosting_type='gbdt', learning_rate=0.01, random_state=1 )
COVID-19 diagnostic
8,860,770
y_pred = y_pred.astype(int) y_pred1 = y_pred1.astype(int) print(y_pred) print(y_pred1 )<define_variables>
bt.fit(train_X, y )
COVID-19 diagnostic
8,860,770
for i in range(0,len(y_pred1)) : if(( y_pred1[i])>1 and y_pred[i]>0): print(i," ",y_pred[i]," ",y_pred1[i] )<compute_test_metric>
bt.predict_proba(test_X)[:,1]
COVID-19 diagnostic
8,860,770
<save_to_csv>
submission['covid_19'] = bt.predict_proba(test_X)[:,1] submission
COVID-19 diagnostic
8,860,770
fields = ['ForecastId','ConfirmedCases','Fatalities'] with open("submission.csv", 'w')as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(fields) for i in range(0,len(y_pred)) : csvwriter.writerow([test['ForecastId'][i],y_pred[i],y_pred1[i]] )<train_model>
submission.to_csv('submission.csv', index=False )
COVID-19 diagnostic
8,860,770
Image(".. /input/covid19-forecast-week-2/COVID-19 forecast - week 2.png" )<feature_engineering>
submission.to_csv('submission.csv', index=False )
COVID-19 diagnostic
9,283,112
data = pd.read_csv('.. /input/covid19-global-forecasting-week-2/train.csv') data['ConfirmedCases'] = data['ConfirmedCases'].apply(lambda x: np.log(x + 1)) data['Fatalities'] = data['Fatalities'].apply(lambda x: np.log(x + 1)) data["Country & Subdivision"] = [(data['Country_Region'][i], \ data['Province_State'][i])for i in range(len(data)) ] unique_areas = data['Country & Subdivision'].unique() unique_areas_dict = {j : i for i, j in enumerate(unique_areas)} NUM_DATES = 100; TRAIN_SIZE = 57; train = data[data['Id']%NUM_DATES <= TRAIN_SIZE] encoded_train = np.array(list(map(lambda x: unique_areas_dict[x], train['Country & Subdivision'].to_numpy())))\ .reshape(( -1, 1)) class MyModel(tf.keras.Model): def __init__(self,): super(MyModel, self ).__init__() self.embedding_size = 10 self.LSTM_size = 30 self.w1 = tf.keras.layers.Embedding(len(unique_areas), self.embedding_size,) self.w2 = tf.keras.layers.LSTM(self.LSTM_size, return_sequences=True, stateful=False,) self.w3 = tf.keras.layers.LSTM(forecast_length, return_sequences=True, stateful=False, activation='relu') def call(self, inputs): location, curr_num = tf.split(inputs, [-1, 1], 2) x = self.w1(location) x = tf.reshape(x, [-1,inputs.shape[1],self.embedding_size]) x = tf.concat([x, curr_num], axis=2) x = self.w2(x,) x = self.w3(x,) return x forecast_length = 13 X = np.concatenate(( encoded_train, train['ConfirmedCases'].to_numpy().reshape(( -1, 1))), axis=1) X = X.reshape(( len(unique_areas), -1, 2)) total_length = X.shape[1] Y = X[:, :, -1] Y = [ [y[i:i+forecast_length] for i in range(1, total_length-forecast_length+1)] for y in Y ] Y = np.array(Y) X = X[:, :-forecast_length, :] model_1 = MyModel() model_1.compile(optimizer='adam', loss='mae',) while 'history_1' not in vars() or history_1.history['loss'][-1] > 0.1: history_1 = model_1.fit(X, Y, epochs=10000, batch_size=len(unique_areas),) model_1.save_weights('model_1_weights.h5') encoded_test = np.array(list(map(lambda x: unique_areas_dict[x], data['Country & Subdivision'].to_numpy())))\ .reshape(( -1, 1)) test_X = np.concatenate(( encoded_test, data['ConfirmedCases'].to_numpy().reshape(( -1, 1))), axis=1) test_X = test_X.reshape(( len(unique_areas), -1, 2)) test_X = test_X[:, :TRAIN_SIZE, :] subm = pd.read_csv('.. /input/covid19-global-forecasting-week-2/submission.csv') results = np.zeros(( X.shape[0], 0)) for i in range(4): model_1.reset_states() test_Y = model_1(test_X ).numpy() [:, -1, :] results = np.concatenate(( results, np.exp(test_Y)- 1), axis=1) test_Y = test_Y.reshape(( -1, forecast_length, 1)) test_Y = np.concatenate( ([[[i]] * forecast_length for i in range(len(test_Y)) ], test_Y,), axis = 2) test_X = np.concatenate(( test_X, test_Y), axis=1) subm['ConfirmedCases'] = results[:, :NUM_DATES-TRAIN_SIZE].reshape(-1) X = np.concatenate(( encoded_train, train['Fatalities'].to_numpy().reshape(( -1, 1))), axis=1) X = X.reshape(( len(unique_areas), -1, 2)) total_length = X.shape[1] Y = X[:, :, -1] Y = [ [y[i:i+forecast_length] for i in range(1, total_length-forecast_length+1)] for y in Y ] Y = np.array(Y) X = X[:, :-forecast_length, :] model_2 = MyModel() model_2.compile(optimizer='adam', loss='mse',) while 'history_2' not in vars() or history_2.history['loss'][-1] > 0.01: history_2 = model_2.fit(X, Y, epochs=10000, batch_size=len(unique_areas),) model_2.save_weights('model_2_weights.h5') encoded_test = np.array(list(map(lambda x: unique_areas_dict[x], data['Country & Subdivision'].to_numpy())))\ .reshape(( -1, 1)) test_X = np.concatenate(( encoded_test, data['Fatalities'].to_numpy().reshape(( -1, 1))), axis=1) test_X = test_X.reshape(( len(unique_areas), -1, 2)) test_X = test_X[:, :TRAIN_SIZE, :] results = np.zeros(( X.shape[0], 0)) for i in range(4): model_2.reset_states() test_Y = model_2(test_X ).numpy() [:, -1, :] print(np.exp(test_Y[2])-1) results = np.concatenate(( results, np.exp(test_Y)- 1), axis=1) test_Y = test_Y.reshape(( -1, forecast_length, 1)) test_Y = np.concatenate( ([[[i]] * forecast_length for i in range(len(test_Y)) ], test_Y,), axis = 2) test_X = np.concatenate(( test_X, test_Y), axis=1) subm['Fatalities'] = results[:, :NUM_DATES-TRAIN_SIZE].reshape(-1) subm.to_csv('submission.csv', index=False )<set_options>
pd.set_option('display.max.columns',3000 )
KNIT_HACKS
9,283,112
warnings.filterwarnings('ignore' )<load_from_csv>
train=pd.read_csv('/kaggle/input/knit-hacks/train.csv') test=pd.read_csv('/kaggle/input/knit-hacks/test.csv' )
KNIT_HACKS
9,283,112
dftrain = pd.read_csv('.. /input/covid19-global-forecasting-week-2/train.csv', parse_dates=['Date'] ).sort_values(by=['Country_Region', 'Date']) dftest = pd.read_csv('.. /input/covid19-global-forecasting-week-2/test.csv', parse_dates=['Date'] ).sort_values(by=['Country_Region', 'Date']) ppp_tabel = pd.read_csv('.. /input/country-ppp/Country_PPP.csv', sep='\s+') ppp_tabel.drop('Id', 1,inplace=True) ppp_tabel["Country"].replace('_',' ', regex=True,inplace=True) ppp_tabel["Country"].replace('United States','US', regex=True,inplace=True) ppp_tabel.rename(columns={'Country':'Country_Region'},inplace=True) ppp_tabel.sort_values('Country_Region',inplace=True) <merge>
train['Col2'].value_counts() /train.shape[0]
KNIT_HACKS
9,283,112
dftrain['Dayofyear'] = dftrain['Date'].dt.dayofyear dftest['Dayofyear'] = dftest['Date'].dt.dayofyear dftest['Expo'] = dftest['Dayofyear']-89.5 dftest = dftest.merge(dftrain[['Country_Region','Province_State','Date','ConfirmedCases','Fatalities']], on=['Country_Region','Province_State','Date'], how='left', indicator=True) print("dftest columns =",dftest.columns) dftest.head(60) <merge>
train.isnull().sum().any()
KNIT_HACKS
9,283,112
grouped=dftrain.groupby(['Country_Region','Province_State'] ).tail(8) grouped=grouped.groupby(['Country_Region','Province_State'] ).head(4) grouped.drop(['FatalityBasis'],axis=1,inplace=True) to_sum = ['NewCases','NewFatalities'] grouped1 = grouped.groupby(['Country_Region'])[to_sum].sum() grouped1.rename(columns={'NewCases':'NewCases1','NewFatalities':'NewFatalities1'}, inplace=True) print("grouped1 columns =",grouped1.columns) print("grouped2 columns =",grouped2.columns) grouped = pd.merge(grouped1, grouped2, on=['Country_Region']) grouped['CasesIncreasePct'] = 100*(grouped['NewCases2']/grouped['NewCases1']-1) mask = grouped['CasesIncreasePct'] > 140 grouped.loc[mask,'CasesIncreasePct'] = 140 mask = grouped['CasesIncreasePct'] < 0 grouped.loc[mask,'CasesIncreasePct'] = 0 mask = grouped['CasesIncreasePct'].isnull() grouped.loc[mask,'CasesIncreasePct'] = 0 grouped['Factor'] =(grouped['CasesIncreasePct']/100+1)**0.25 grouped = pd.merge(grouped, ppp_tabel, on=['Country_Region']) grouped['ppp'] = grouped['ppp']/10000. mask =(grouped['FatalityPct2'] > 9)&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 5 mask =(grouped['FatalityPct2'] < 5)&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 5 mask =(grouped['FatalityPct2'] > 6)&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 6 mask =(grouped['FatalityPct2'] < 1.5)&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 1.5 mask =(grouped['FatalityPct2'] >(9.5 - 0.43*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(9.5 - 0.43*grouped['ppp']) mask =(grouped['FatalityPct2'] <(5.6 - 0.5*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(5.6 - 0.5*grouped['ppp']) mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] <= 1) grouped.loc[mask,'FatalityPct2'] = 7 mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] >= 7) grouped.loc[mask,'FatalityPct2'] = 4 mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] > 1)&(grouped['ppp'] < 7) grouped.loc[mask,'FatalityPct2'] =(7.5 - 0.5*grouped['ppp']) grouped.tail(6) print("grouped columns =",grouped.columns )<merge>
cat_col=[col for col in train.columns if train[col].dtype=='O']
KNIT_HACKS
9,283,112
dftest.drop('_merge',axis=1,inplace= True) dftest = dftest.merge(grouped[['Country_Region','FatalityPct2','Factor']], on=['Country_Region'], how='left') dftest = dftest.merge(grouped_gem[['Province_State','Country_Region','ConfirmedCases_base','ConfirmedCases_init','NewCases_base','Fatalities_init','FatalityBasis']], on=['Province_State','Country_Region'], how='left') <feature_engineering>
train=train.drop(cat_col,axis=1 )
KNIT_HACKS
9,283,112
dftest['ConfirmedCases_shift'] = dftest.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases']].transform(lambda x: x.shift(1)) mask = dftest['ConfirmedCases'].isnull() dftest.loc[mask,'NewCases'] = dftest.loc[mask,'NewCases_base']*(dftest.loc[mask,'Factor']**dftest.loc[mask,'Expo']) dftest['NewCases_cum'] = dftest.groupby(['Country_Region', 'Province_State'])[['NewCases']].cumsum() dftest.loc[mask,'ConfirmedCases'] = dftest.loc[mask,'ConfirmedCases_init'] + dftest.loc[mask,'NewCases_cum'] mask3 = dftest['ConfirmedCases'] > 400000 dftest.loc[mask3,'FatalityPct2'] = dftest.loc[mask3,'FatalityPct2']*0.7 mask4 = dftest['ConfirmedCases'] > 800000 dftest.loc[mask4,'FatalityPct2'] = dftest.loc[mask4,'FatalityPct2']*0.7 dftest['FatalityBasis'] = dftest.groupby(['Country_Region', 'Province_State'])[ ['ConfirmedCases']].transform(lambda x: x.shift(10)) dftest.loc[mask,'NewFatalities'] = dftest.loc[mask,'FatalityBasis'] * dftest.loc[mask,'FatalityPct2']/100 mask2 = dftest['NewFatalities'] >1000 dftest.loc[mask2,'NewFatalities'] = 1000 print("MASK2",mask2.sum()) dftest['NewFatalities_cum'] = dftest.groupby(['Country_Region', 'Province_State'])[['NewFatalities']].cumsum() dftest.loc[mask,'Fatalities'] = dftest.loc[mask,'Fatalities_init'] + dftest.loc[mask,'NewFatalities_cum'] print("dftest columns =",dftest.columns) dftest.head(5 )<drop_column>
test_new=test.copy(deep=True )
KNIT_HACKS
9,283,112
dftest.drop(['Dayofyear', 'Expo','FatalityPct2', 'Factor', 'ConfirmedCases_base', 'ConfirmedCases_init', 'NewCases_base', 'Fatalities_init', 'FatalityBasis', 'ConfirmedCases_shift', 'NewCases', 'NewCases_cum', 'NewFatalities','NewFatalities_cum'],axis=1,inplace=True) final = dftest.groupby(['Country_Region','Province_State'] ).tail(1) dftest.drop(['Province_State'],axis=1,inplace=True) dftest.rename(columns={'Province_State_orig':'Province_State'},inplace=True )<drop_column>
test_new=test_new.drop(cat_col,axis=1 )
KNIT_HACKS
9,283,112
dftest.drop(['Province_State','Country_Region','Date'],axis=1,inplace=True) print("dftest columns =",dftest.columns) <save_to_csv>
train=train.fillna(0 )
KNIT_HACKS
9,283,112
dftest.ForecastId = dftest.ForecastId.astype('int') dftest['ConfirmedCases'] = dftest['ConfirmedCases'].round().astype(int) dftest['Fatalities'] = dftest['Fatalities'].round().astype(int) dftest.to_csv('submission.csv', index=False) <set_options>
test_new=test_new.fillna(0 )
KNIT_HACKS
9,283,112
%matplotlib inline <load_from_csv>
! pip install -U sliced
KNIT_HACKS
9,283,112
train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv") test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/test.csv") train.head()<feature_engineering>
train_copy=train.copy(deep=True )
KNIT_HACKS
9,283,112
EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state train_['Province_State'].fillna(EMPTY_VAL, inplace=True) train_['Province_State'] = train_.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1) test.head()<prepare_x_and_y>
train_copy=train_copy.drop('Col2',axis=1 )
KNIT_HACKS
9,283,112
train_['row_number'] = train_.groupby(['Country_Region', 'Province_State'] ).cumcount() x = train_[train_["Country_Region"] == 'China'][train_["Province_State"] == 'Hubei']['row_number'] y = train_[train_["Country_Region"] == 'China'][train_["Province_State"] == 'Hubei']['ConfirmedCases'] y_ = train_[train_["Country_Region"] == 'China'][train_["Province_State"] == 'Hubei']['Fatalities'] def f(x, L, b, k, x_0): return L /(1.+ np.exp(-k *(x - x_0)))+ b def logistic(xs, L, k, x_0): result = [] for x in xs: xp = k*(x-x_0) if xp >= 0: result.append(L /(1.+ np.exp(-xp))) else: result.append(L * np.exp(xp)/(1.+ np.exp(xp))) return result p0 = [max(y), 0.0,max(x)] p0_ = [max(y_), 0.0,max(x)] x_ = np.arange(0, 100, 1 ).tolist() try: popt, pcov = opt.curve_fit(logistic, x, y,p0) yfit = logistic(x_, *popt) popt_, pcov_ = opt.curve_fit(logistic, x, y_,p0_) yfit_ = logistic(x_, *popt_) except: popt, pcov = opt.curve_fit(f, x, y, method="lm", maxfev=5000) yfit = f(x_, *popt) popt_, pcov_ = opt.curve_fit(f, x, y_, method="lm", maxfev=5000) yfit_ = f(x_, *popt_) fig, ax = plt.subplots(1, 1, figsize=(10, 8)) ax.plot(x, y, 'o', label ='Actual Cases') ax.plot(x_, yfit, '-', label ='Fitted Cases') ax.plot(x, y_, 'o', label ='Actual Fatalities') ax.plot(x_, yfit_, '-', label ='Fitted fatalities') ax.title.set_text('China - Hubei province') plt.legend(loc="center right") plt.show()<prepare_output>
vt=VarianceThreshold(threshold=0) vt_x=vt.fit_transform(train_copy )
KNIT_HACKS
9,283,112
unique = pd.DataFrame(train_.groupby(['Country_Region', 'Province_State'],as_index=False ).count()) unique.head()<prepare_x_and_y>
vt_test=vt.transform(test_new )
KNIT_HACKS
9,283,112
def date_day_diff(d1, d2): delta = dt.datetime.strptime(d1, "%Y-%m-%d")- dt.datetime.strptime(d2, "%Y-%m-%d") return delta.days log_regions = [] for index, region in unique.iterrows() : st = region['Province_State'] co = region['Country_Region'] rdata = train_[(train_['Province_State']==st)&(train_['Country_Region']==co)] t = rdata['Date'].values t = [float(date_day_diff(d, t[0])) for d in t] y = rdata['ConfirmedCases'].values y_ = rdata['Fatalities'].values p0 = [max(y), 0.0, max(t)] p0_ = [max(y_), 0.0, max(t)] try: popt, pcov = opt.curve_fit(logistic, t, y, p0, maxfev=10000) try: popt_, pcov_ = opt.curve_fit(logistic, t, y_, p0_, maxfev=10000) except: popt_, pcov_ = opt.curve_fit(f, t, y_,method="trf", maxfev=10000) log_regions.append(( co,st,popt,popt_)) except: popt, pcov = opt.curve_fit(f, t, y,method="trf", maxfev=10000) popt_, pcov_ = opt.curve_fit(f, t, y_,method="trf", maxfev=10000) log_regions.append(( co,st,popt,popt_)) print("All done!" )<prepare_output>
sir = SlicedInverseRegression(n_directions=2)
KNIT_HACKS
9,283,112
log_regions = pd.DataFrame(log_regions) log_regions.head()<rename_columns>
sir.fit(vt_x,train['Col2'] )
KNIT_HACKS
9,283,112
log_regions.columns = ['Country_Region','Province_State','ConfirmedCases','Fatalities'] log_regions.head(1 )<define_search_space>
X_sir=sir.transform(vt_x )
KNIT_HACKS
9,283,112
log_regions['ConfirmedCases'].str[1].quantile([.1,.25,.5,.75,.95,.99] )<define_variables>
X_test=sir.transform(vt_test )
KNIT_HACKS
9,283,112
for index, rt in log_regions.iterrows() : st = rt['Province_State'] co = rt['Country_Region'] popt = list(['ConfirmedCases']) popt_ = list(rt['Fatalities']) print(co,st,popt,popt_ )<count_duplicates>
from sklearn.linear_model import LogisticRegression
KNIT_HACKS
9,283,112
for index, rt in log_regions.iterrows() : st = rt['Province_State'] co = rt['Country_Region'] popt = list(rt['ConfirmedCases']) popt_ = list(rt['Fatalities']) if popt_ == [0.0,0.0,69.0]: popt_ = np.multiply(fp,popt) print(co,st,popt,popt_ )<concatenate>
lr=LogisticRegression().fit(X_sir,train['Col2'] )
KNIT_HACKS