kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
90,625 | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
<load_from_csv> | m = phone.phone_brand.str.cat(phone.device_model)
modelencoder = LabelEncoder().fit(m)
phone['model'] = modelencoder.transform(m)
gatrain['model'] = phone['model']
gatest['model'] = phone['model']
Xtr_model = csr_matrix(( np.ones(gatrain.shape[0]),
(gatrain.trainrow, gatrain.model)))
Xte_model = csr_matrix(( np.ones(gatest.shape[0]),
(gatest.testrow, gatest.model)))
print('Model features: train shape {}, test shape {}'.format(Xtr_model.shape, Xte_model.shape)) | TalkingData Mobile User Demographics |
90,625 | def giba_model() :
def exponential(x, a, k, b):
return a*np.exp(x*k)+ b
def rmse(yt, yp):
return np.sqrt(np.mean(( yt-yp)**2))
train = pd.read_csv('.. /input/covid19-global-forecasting-week-2/train.csv')
train['Date'] = pd.to_datetime(train['Date'])
train['Province_State'] = train['Province_State'].fillna('')
test = pd.read_csv('.. /input/covid19-global-forecasting-week-2/test.csv')
test['Date'] = pd.to_datetime(test['Date'])
test['Province_State'] = test['Province_State'].fillna('')
test['Id'] = -1
test['ConfirmedCases'] = 0
test['Fatalities'] = 0
publictest = test.loc[ test.Date > train.Date.max() ].copy()
train = pd.concat(( train, publictest))
train['ForecastId'] = pd.merge(train, test, on=['Country_Region','Province_State','Date'], how='left')['ForecastId_y'].values
train.sort_values(['Country_Region','Province_State','Date'], inplace=True)
train = train.reset_index(drop=True)
train['cid'] = train['Country_Region'] + '_' + train['Province_State']
train['log0'] = np.log1p(train['ConfirmedCases'])
train['log1'] = np.log1p(train['Fatalities'])
train['log0'] = train.groupby('cid')['log0'].cummax()
train['log1'] = train.groupby('cid')['log1'].cummax()
train = train.loc[(train.log0 > 0)|(train.ForecastId.notnull())].copy()
train = train.reset_index(drop=True)
train['day'] = train.groupby('cid')['Id'].cumcount()
def create_features(df, traindate, lag=1):
df['lag0_1'] = df.groupby('cid')['target0'].shift(lag)
df['lag1_1'] = df.groupby('cid')['target1'].shift(lag)
df['lag0_1'] = df.groupby('cid')['lag0_1'].fillna(method='bfill')
df['lag1_1'] = df.groupby('cid')['lag1_1'].fillna(method='bfill')
df['m0'] = df.groupby('cid')['lag0_1'].rolling(2 ).mean().values
df['m1'] = df.groupby('cid')['lag0_1'].rolling(3 ).mean().values
df['m2'] = df.groupby('cid')['lag0_1'].rolling(4 ).mean().values
df['m3'] = df.groupby('cid')['lag0_1'].rolling(5 ).mean().values
df['m4'] = df.groupby('cid')['lag0_1'].rolling(7 ).mean().values
df['m5'] = df.groupby('cid')['lag0_1'].rolling(10 ).mean().values
df['m6'] = df.groupby('cid')['lag0_1'].rolling(12 ).mean().values
df['m7'] = df.groupby('cid')['lag0_1'].rolling(16 ).mean().values
df['m8'] = df.groupby('cid')['lag0_1'].rolling(20 ).mean().values
df['n0'] = df.groupby('cid')['lag1_1'].rolling(2 ).mean().values
df['n1'] = df.groupby('cid')['lag1_1'].rolling(3 ).mean().values
df['n2'] = df.groupby('cid')['lag1_1'].rolling(4 ).mean().values
df['n3'] = df.groupby('cid')['lag1_1'].rolling(5 ).mean().values
df['n4'] = df.groupby('cid')['lag1_1'].rolling(7 ).mean().values
df['n5'] = df.groupby('cid')['lag1_1'].rolling(10 ).mean().values
df['n6'] = df.groupby('cid')['lag1_1'].rolling(12 ).mean().values
df['n7'] = df.groupby('cid')['lag1_1'].rolling(16 ).mean().values
df['n8'] = df.groupby('cid')['lag1_1'].rolling(20 ).mean().values
df['m0'] = df.groupby('cid')['m0'].fillna(method='bfill')
df['m1'] = df.groupby('cid')['m1'].fillna(method='bfill')
df['m2'] = df.groupby('cid')['m2'].fillna(method='bfill')
df['m3'] = df.groupby('cid')['m3'].fillna(method='bfill')
df['m4'] = df.groupby('cid')['m4'].fillna(method='bfill')
df['m5'] = df.groupby('cid')['m5'].fillna(method='bfill')
df['m6'] = df.groupby('cid')['m6'].fillna(method='bfill')
df['m7'] = df.groupby('cid')['m7'].fillna(method='bfill')
df['m8'] = df.groupby('cid')['m8'].fillna(method='bfill')
df['n0'] = df.groupby('cid')['n0'].fillna(method='bfill')
df['n1'] = df.groupby('cid')['n1'].fillna(method='bfill')
df['n2'] = df.groupby('cid')['n2'].fillna(method='bfill')
df['n3'] = df.groupby('cid')['n3'].fillna(method='bfill')
df['n4'] = df.groupby('cid')['n4'].fillna(method='bfill')
df['n5'] = df.groupby('cid')['n5'].fillna(method='bfill')
df['n6'] = df.groupby('cid')['n6'].fillna(method='bfill')
df['n7'] = df.groupby('cid')['n7'].fillna(method='bfill')
df['n8'] = df.groupby('cid')['n8'].fillna(method='bfill')
df['flag_China'] = 1*(df['Country_Region'] == 'China')
df['flag_Italy'] = 1*(df['Country_Region'] == 'Italy')
df['flag_Spain'] = 1*(df['Country_Region'] == 'Spain')
df['flag_US'] = 1*(df['Country_Region'] == 'US')
df['flag_Brazil']= 1*(df['Country_Region'] == 'Brazil')
tr = df.loc[ df.Date < traindate ].copy()
vl = df.loc[ df.Date == traindate ].copy()
tr = tr.loc[ tr.lag0_1 > 0 ]
return tr, vl
def train_period(
train,
valid_days = ['2020-03-13'],
lag = 1,
seed = 1,
):
train['target0'] = np.log1p(train['ConfirmedCases'])
train['target1'] = np.log1p(train['Fatalities'])
param = {
'subsample': 0.80,
'colsample_bytree': 0.85,
'max_depth': 7,
'gamma': 0.000,
'learning_rate': 0.01,
'min_child_weight': 5.00,
'reg_alpha': 0.000,
'reg_lambda': 0.400,
'silent':1,
'objective':'reg:squarederror',
'nthread': -1,
'seed': seed,
}
tr, vl = create_features(train.copy() , valid_days[0] , lag=lag)
features = [f for f in tr.columns if f not in [
'Id',
'ConfirmedCases',
'Fatalities',
'log0',
'log1',
'target0',
'target1',
'Province_State',
'Country_Region',
'Date',
'ForecastId',
'cid',
] ]
dtrain = xgb.DMatrix(tr[features], tr['target0'])
dvalid = xgb.DMatrix(vl[features], vl['target0'])
watchlist = [(dvalid, 'eval')]
model0 = xgb.train(param, dtrain, 767, watchlist , verbose_eval=0)
dtrain = xgb.DMatrix(tr[features], tr['target1'])
dvalid = xgb.DMatrix(vl[features], vl['target1'])
watchlist = [(dvalid, 'eval')]
model1 = xgb.train(param, dtrain, 767, watchlist , verbose_eval=0)
ypred0 = model0.predict(dvalid)
ypred1 = model1.predict(dvalid)
vl['ypred0'] = ypred0
vl['ypred1'] = ypred1
feats = ['Province_State','Country_Region','Date']
for day in valid_days:
tr, vl = create_features(train.copy() , day, lag=2)
dvalid = xgb.DMatrix(vl[features])
ypred0 = model0.predict(dvalid)
ypred1 = model1.predict(dvalid)
vl['ypred0'] = ypred0
vl['ypred1'] = ypred1
train[ 'ypred0' ] = pd.merge(train[feats], vl[feats+['ypred0']], on=feats, how='left')['ypred0'].values
train.loc[ train.ypred0<0, 'ypred0'] = 0
train.loc[ train.ypred0.notnull() , 'target0'] = train.loc[ train.ypred0.notnull() , 'ypred0']
train[ 'ypred1' ] = pd.merge(train[feats], vl[feats+['ypred1']], on=feats, how='left')['ypred1'].values
train.loc[ train.ypred1<0, 'ypred1'] = 0
train.loc[ train.ypred1.notnull() , 'target1'] = train.loc[ train.ypred1.notnull() , 'ypred1']
px = np.where(( train.Date==day)) [0]
print(day, rmse(train['log0'].iloc[px], train['target0'].iloc[px]), rmse(train['log1'].iloc[px], train['target1'].iloc[px]))
VALID = train.loc[(train.Date>=valid_days[0])&(train.Date<=valid_days[-1])].copy()
del VALID['ypred0'],VALID['ypred1']
sc0 = rmse(VALID['log0'], VALID['target0'])
sc1 = rmse(VALID['log1'], VALID['target1'])
print(sc0, sc1,(sc0+sc1)/2)
return VALID.copy()
VALID0 = train_period(train,
valid_days = ['2020-03-13','2020-03-14','2020-03-15','2020-03-16','2020-03-17','2020-03-18','2020-03-19','2020-03-20','2020-03-21','2020-03-22','2020-03-23','2020-03-24','2020-03-25','2020-03-26','2020-03-27','2020-03-28','2020-03-29','2020-03-30','2020-03-31'],
lag = 1,
seed = 1)
VALID1 = train_period(train,
valid_days = ['2020-03-16','2020-03-17','2020-03-18','2020-03-19','2020-03-20','2020-03-21','2020-03-22','2020-03-23','2020-03-24','2020-03-25','2020-03-26','2020-03-27','2020-03-28','2020-03-29','2020-03-30','2020-03-31'],
lag = 1,
seed = 1)
VALID2 = train_period(train,
valid_days = ['2020-03-19','2020-03-20','2020-03-21','2020-03-22','2020-03-23','2020-03-24','2020-03-25','2020-03-26','2020-03-27','2020-03-28','2020-03-29','2020-03-30','2020-03-31'],
lag = 1,
seed = 1)
VALID3 = train_period(train,
valid_days = ['2020-03-22','2020-03-23','2020-03-24','2020-03-25','2020-03-26','2020-03-27','2020-03-28','2020-03-29','2020-03-30','2020-03-31'],
lag = 1,
seed = 1)
sa0 = rmse(VALID0['log0'], VALID0['target0'])
sa1 = rmse(VALID1['log0'], VALID1['target0'])
sa2 = rmse(VALID2['log0'], VALID2['target0'])
sa3 = rmse(VALID3['log0'], VALID3['target0'])
sb0 = rmse(VALID0['log1'], VALID0['target1'])
sb1 = rmse(VALID1['log1'], VALID1['target1'])
sb2 = rmse(VALID2['log1'], VALID2['target1'])
sb3 = rmse(VALID3['log1'], VALID3['target1'])
print('13-31: ' + str(sa0)[:6] + ', ' + str(sb0)[:6] + ' = ' + str(0.5*sa0+0.5*sb0)[:6])
print('16-31: ' + str(sa1)[:6] + ', ' + str(sb1)[:6] + ' = ' + str(0.5*sa1+0.5*sb1)[:6])
print('19-31: ' + str(sa2)[:6] + ', ' + str(sb2)[:6] + ' = ' + str(0.5*sa2+0.5*sb2)[:6])
print('22-31: ' + str(sa3)[:6] + ', ' + str(sb3)[:6] + ' = ' + str(0.5*sa3+0.5*sb3)[:6])
print('Avg: ',(sa0+sb0+sa1+sb1+sa2+sb2+sa3+sb3)/ 8)
TEST = train_period(train,
valid_days = ['2020-04-01','2020-04-02','2020-04-03','2020-04-04','2020-04-05','2020-04-06','2020-04-07','2020-04-08','2020-04-09','2020-04-10',
'2020-04-11','2020-04-12','2020-04-13','2020-04-14','2020-04-15','2020-04-16','2020-04-17','2020-04-18','2020-04-19','2020-04-20',
'2020-04-21','2020-04-22','2020-04-23','2020-04-24','2020-04-25','2020-04-26','2020-04-27','2020-04-28','2020-04-29','2020-04-30'],
lag = 1,
seed = 1)
VALID2_sub = VALID2.copy()
VALID2_sub['target0'] = np.log1p(VALID2_sub['ConfirmedCases'])
VALID2_sub['target1'] = np.log1p(VALID2_sub['Fatalities'])
sub = pd.concat(( VALID2_sub,TEST.loc[ TEST.Date>='2020-04-01' ]))
sub = sub[['ForecastId','target0','target1']]
sub.columns = ['ForecastId','ConfirmedCases','Fatalities']
sub['ForecastId'] = sub['ForecastId'].astype(np.int)
sub['ConfirmedCases'] = np.expm1(sub['ConfirmedCases'])
sub['Fatalities'] = np.expm1(sub['Fatalities'])
print(sub.describe())
VALID0.to_csv('fold-13-31.csv', index=False)
VALID1.to_csv('fold-16-31.csv', index=False)
VALID2.to_csv('fold-19-31.csv', index=False)
VALID3.to_csv('fold-22-31.csv', index=False)
TEST.to_csv('fold-submission.csv', index=False)
return sub
<load_from_csv> | appencoder = LabelEncoder().fit(appevents.app_id)
appevents['app'] = appencoder.transform(appevents.app_id)
napps = len(appencoder.classes_)
deviceapps =(appevents.merge(events[['device_id']], how='left',left_on='event_id',right_index=True)
.groupby(['device_id','app'])['app'].agg(['size'])
.merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True)
.merge(gatest[['testrow']], how='left', left_index=True, right_index=True)
.reset_index())
deviceapps.head() | TalkingData Mobile User Demographics |
90,625 | def get_nn_sub() :
df = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv")
sub_df = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv")
coo_df = pd.read_csv(".. /input/covid19week1/train.csv" ).rename(columns={"Country/Region": "Country_Region"})
coo_df = coo_df.groupby("Country_Region")[["Lat", "Long"]].mean().reset_index()
coo_df = coo_df[coo_df["Country_Region"].notnull() ]
loc_group = ["Province_State", "Country_Region"]
def preprocess(df):
df["Date"] = df["Date"].astype("datetime64[ms]")
df["days"] =(df["Date"] - pd.to_datetime("2020-01-01")).dt.days
df["weekend"] = df["Date"].dt.dayofweek//5
df = df.merge(coo_df, how="left", on="Country_Region")
df["Lat"] =(df["Lat"] // 30 ).astype(np.float32 ).fillna(0)
df["Long"] =(df["Long"] // 60 ).astype(np.float32 ).fillna(0)
for col in loc_group:
df[col].fillna("none", inplace=True)
return df
df = preprocess(df)
sub_df = preprocess(sub_df)
print(df.shape)
TARGETS = ["ConfirmedCases", "Fatalities"]
for col in TARGETS:
df[col] = np.log1p(df[col])
NUM_SHIFT = 5
features = ["Lat", "Long"]
for s in range(1, NUM_SHIFT+1):
for col in TARGETS:
df["prev_{}_{}".format(col, s)] = df.groupby(loc_group)[col].shift(s)
features.append("prev_{}_{}".format(col, s))
df = df[df["Date"] >= df["Date"].min() + timedelta(days=NUM_SHIFT)].copy()
TEST_FIRST = sub_df["Date"].min()
TEST_DAYS =(df["Date"].max() - TEST_FIRST ).days + 1
dev_df, test_df = df[df["Date"] < TEST_FIRST].copy() , df[df["Date"] >= TEST_FIRST].copy()
def nn_block(input_layer, size, dropout_rate, activation):
out_layer = KL.Dense(size, activation=None )(input_layer)
out_layer = KL.Activation(activation )(out_layer)
out_layer = KL.Dropout(dropout_rate )(out_layer)
return out_layer
def get_model() :
inp = KL.Input(shape=(len(features),))
hidden_layer = nn_block(inp, 64, 0.0, "relu")
gate_layer = nn_block(hidden_layer, 32, 0.0, "sigmoid")
hidden_layer = nn_block(hidden_layer, 32, 0.0, "relu")
hidden_layer = KL.multiply([hidden_layer, gate_layer])
out = KL.Dense(len(TARGETS), activation="linear" )(hidden_layer)
model = tf.keras.models.Model(inputs=[inp], outputs=out)
return model
get_model().summary()
def get_input(df):
return [df[features]]
NUM_MODELS = 10
def train_models(df, save=False):
models = []
for i in range(NUM_MODELS):
model = get_model()
model.compile(loss="mean_squared_error", optimizer=Nadam(lr=1e-4))
hist = model.fit(get_input(df), df[TARGETS],
batch_size=2048, epochs=500, verbose=0, shuffle=True)
if save:
model.save_weights("model{}.h5".format(i))
models.append(model)
return models
models = train_models(dev_df)
prev_targets = ['prev_ConfirmedCases_1', 'prev_Fatalities_1']
def predict_one(df, models):
pred = np.zeros(( df.shape[0], 2))
for model in models:
pred += model.predict(get_input(df)) /len(models)
pred = np.maximum(pred, df[prev_targets].values)
pred[:, 0] = np.log1p(np.expm1(pred[:, 0])+ 0.1)
pred[:, 1] = np.log1p(np.expm1(pred[:, 1])+ 0.01)
return np.clip(pred, None, 15)
print([mean_squared_error(dev_df[TARGETS[i]], predict_one(dev_df, models)[:, i])for i in range(len(TARGETS)) ])
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
def evaluate(df):
error = 0
for col in TARGETS:
error += rmse(df[col].values, df["pred_{}".format(col)].values)
return np.round(error/len(TARGETS), 5)
def predict(test_df, first_day, num_days, models, val=False):
temp_df = test_df.loc[test_df["Date"] == first_day].copy()
y_pred = predict_one(temp_df, models)
for i, col in enumerate(TARGETS):
test_df["pred_{}".format(col)] = 0
test_df.loc[test_df["Date"] == first_day, "pred_{}".format(col)] = y_pred[:, i]
print(first_day, np.isnan(y_pred ).sum() , y_pred.min() , y_pred.max())
if val:
print(evaluate(test_df[test_df["Date"] == first_day]))
y_prevs = [None]*NUM_SHIFT
for i in range(1, NUM_SHIFT):
y_prevs[i] = temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]].values
for d in range(1, num_days):
date = first_day + timedelta(days=d)
print(date, np.isnan(y_pred ).sum() , y_pred.min() , y_pred.max())
temp_df = test_df.loc[test_df["Date"] == date].copy()
temp_df[prev_targets] = y_pred
for i in range(2, NUM_SHIFT+1):
temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]] = y_prevs[i-1]
y_pred, y_prevs = predict_one(temp_df, models), [None, y_pred] + y_prevs[1:-1]
for i, col in enumerate(TARGETS):
test_df.loc[test_df["Date"] == date, "pred_{}".format(col)] = y_pred[:, i]
if val:
print(evaluate(test_df[test_df["Date"] == date]))
return test_df
test_df = predict(test_df, TEST_FIRST, TEST_DAYS, models, val=True)
print(evaluate(test_df))
for col in TARGETS:
test_df[col] = np.expm1(test_df[col])
test_df["pred_{}".format(col)] = np.expm1(test_df["pred_{}".format(col)])
models = train_models(df, save=True)
sub_df_public = sub_df[sub_df["Date"] <= df["Date"].max() ].copy()
sub_df_private = sub_df[sub_df["Date"] > df["Date"].max() ].copy()
pred_cols = ["pred_{}".format(col)for col in TARGETS]
sub_df_public = sub_df_public.merge(test_df[["Date"] + loc_group + TARGETS], how="left", on=["Date"] + loc_group)
SUB_FIRST = sub_df_private["Date"].min()
SUB_DAYS =(sub_df_private["Date"].max() - sub_df_private["Date"].min() ).days + 1
sub_df_private = df.append(sub_df_private, sort=False)
for s in range(1, NUM_SHIFT+1):
for col in TARGETS:
sub_df_private["prev_{}_{}".format(col, s)] = sub_df_private.groupby(loc_group)[col].shift(s)
sub_df_private = sub_df_private[sub_df_private["Date"] >= SUB_FIRST].copy()
sub_df_private = predict(sub_df_private, SUB_FIRST, SUB_DAYS, models)
for col in TARGETS:
sub_df_private[col] = np.expm1(sub_df_private["pred_{}".format(col)])
sub_df = sub_df_public.append(sub_df_private, sort=False)
sub_df["ForecastId"] = sub_df["ForecastId"].astype(np.int16)
return sub_df[["ForecastId"] + TARGETS]<find_best_params> | d = deviceapps.dropna(subset=['trainrow'])
Xtr_app = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.app)) ,
shape=(gatrain.shape[0],napps))
d = deviceapps.dropna(subset=['testrow'])
Xte_app = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.app)) ,
shape=(gatest.shape[0],napps))
print('Apps data: train shape {}, test shape {}'.format(Xtr_app.shape, Xte_app.shape)) | TalkingData Mobile User Demographics |
90,625 | sub1 = giba_model()<sort_values> | applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())]
applabels['app'] = appencoder.transform(applabels.app_id)
labelencoder = LabelEncoder().fit(applabels.label_id)
applabels['label'] = labelencoder.transform(applabels.label_id)
nlabels = len(labelencoder.classes_ ) | TalkingData Mobile User Demographics |
90,625 | sub1.sort_values("ForecastId", inplace=True)
sub2.sort_values("ForecastId", inplace=True )<compute_test_metric> | applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())]
applabels['app'] = appencoder.transform(applabels.app_id)
labelencoder = LabelEncoder().fit(applabels.label_id)
applabels['label'] = labelencoder.transform(applabels.label_id)
nlabels = len(labelencoder.classes_ ) | TalkingData Mobile User Demographics |
90,625 | TARGETS = ["ConfirmedCases", "Fatalities"]
[np.sqrt(mean_squared_error(np.log1p(sub1[t].values), np.log1p(sub2[t].values)))for t in TARGETS]<save_to_csv> | devicelabels =(deviceapps[['device_id','app']]
.merge(applabels[['app','label']])
.groupby(['device_id','label'])['app'].agg(['size'])
.merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True)
.merge(gatest[['testrow']], how='left', left_index=True, right_index=True)
.reset_index())
devicelabels.head() | TalkingData Mobile User Demographics |
90,625 | sub_df = sub1.copy()
for t in TARGETS:
sub_df[t] = np.expm1(np.log1p(sub1[t].values)*0.5 + np.log1p(sub2[t].values)*0.5)
sub_df.to_csv("submission.csv", index=False )<compute_test_metric> | d = devicelabels.dropna(subset=['trainrow'])
Xtr_label = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.label)) ,
shape=(gatrain.shape[0],nlabels))
d = devicelabels.dropna(subset=['testrow'])
Xte_label = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.label)) ,
shape=(gatest.shape[0],nlabels))
print('Labels data: train shape {}, test shape {}'.format(Xtr_label.shape, Xte_label.shape)) | TalkingData Mobile User Demographics |
90,625 | def RMSLE(pred,actual):
return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv> | Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label), format='csr')
Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label), format='csr')
print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape)) | TalkingData Mobile User Demographics |
90,625 | pd.set_option('mode.chained_assignment', None)
test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv")
train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv")
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region','Province_State','Date'])
test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering> | Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label), format='csr')
Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label), format='csr')
print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape)) | TalkingData Mobile User Demographics |
90,625 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 20):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_2 = df_val.copy()<compute_test_metric> | targetencoder = LabelEncoder().fit(gatrain.group)
y = targetencoder.transform(gatrain.group)
nclasses = len(targetencoder.classes_ ) | TalkingData Mobile User Demographics |
90,625 | method_list = ['SARIMA']
method_val = [df_val_2]
for i in range(0,1):
df_val = method_val[i]
method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)]
print(method_score )<save_to_csv> | def score(clf, random_state = 0):
kf = StratifiedKFold(y, n_folds=5, shuffle=True, random_state=random_state)
pred = np.zeros(( y.shape[0],nclasses))
for itrain, itest in kf:
Xtr, Xte = Xtrain[itrain, :], Xtrain[itest, :]
ytr, yte = y[itrain], y[itest]
clf.fit(Xtr, ytr)
pred[itest,:] = clf.predict_proba(Xte)
return log_loss(yte, pred[itest, :])
print("{:.5f}".format(log_loss(yte, pred[itest,:])) , end=' ')
print('')
return log_loss(y, pred ) | TalkingData Mobile User Demographics |
90,625 | df_val = df_val_2
submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']]
submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
submission.to_csv('submission.csv', index=False)
submission<import_modules> | score(LogisticRegression(C=0.02)) | TalkingData Mobile User Demographics |
90,625 | import os
import numpy as np
import pandas as pd
from scipy.optimize.minpack import curve_fit<load_from_csv> | score(LogisticRegression(C=0.02, multi_class='multinomial',solver='lbfgs')) | TalkingData Mobile User Demographics |
90,625 | def load_kaggle_csv(dataset: str)-> pd.DataFrame:
df = pd.read_csv(f"/kaggle/input/covid19-global-forecasting-week-2/{dataset}.csv", parse_dates=["Date"])
df["Province_State"].fillna("", inplace=True)
df["DayOfYear"] = df["Date"].dt.dayofyear
df["Date"] = df["Date"].dt.date
return df<load_from_csv> | clf = LogisticRegression(C=0.02, multi_class='multinomial',solver='lbfgs')
clf.fit(Xtrain, y)
pred = pd.DataFrame(clf.predict_proba(Xtest), index = gatest.index, columns=targetencoder.classes_)
pred.head() | TalkingData Mobile User Demographics |
90,625 | <train_model><EOS> | pred.to_csv('logreg_subm.csv',index=True ) | TalkingData Mobile User Demographics |
5,652,045 | submission = pd.DataFrame()
submission["ForecastId"] = np.array(test["ForecastId"])
submission["ConfirmedCases"] = np.array(conf_list)
submission["Fatalities"] = np.array(fat_list )<choose_model_class> | !pip install keras==2.2.4
| Diabetic Retinopathy Detection |
5,652,045 | model_conf2 = xgb.XGBRegressor(base_score=0.5, booster=None, colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1,
importance_type='gain', interaction_constraints=None,
learning_rate=0.3, max_delta_step=0, max_depth=19,
min_child_weight=1, monotone_constraints=None,
n_estimators=1000, n_jobs=0, num_parallel_tree=1,
objective='reg:squarederror', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method=None,
validate_parameters=False, verbosity=None)
model_fat2 = xgb.XGBRegressor(base_score=0.5, booster=None, colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1,
importance_type='gain', interaction_constraints=None,
learning_rate=0.3, max_delta_step=0, max_depth=18,
min_child_weight=1, monotone_constraints=None,
n_estimators=1000, n_jobs=0, num_parallel_tree=1,
objective='reg:squarederror', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method=None,
validate_parameters=False, verbosity=None )<prepare_x_and_y> | class Length(layers.Layer):
def call(self, inputs, **kwargs):
return K.sqrt(K.sum(K.square(inputs), -1))
def compute_output_shape(self, input_shape):
return input_shape[:-1] | Diabetic Retinopathy Detection |
5,652,045 | def train_function2() :
train_tmp = pd.DataFrame(train)
y_conf = train_tmp["ConfirmedCases"]
y_fat = train_tmp["Fatalities"]
X_fat = train_tmp.drop(["Fatalities"],axis=1)
X_conf = train_tmp.drop(["ConfirmedCases"],axis=1)
model_fat2.fit(X_fat,y_fat)
model_conf2.fit(X_conf,y_conf )<predict_on_test> | class Mask(layers.Layer):
def call(self, inputs, **kwargs):
if type(inputs)is list:
assert len(inputs)== 2
inputs, mask = inputs
else:
x = inputs
x =(x - K.max(x, 1, True)) / K.epsilon() + 1
mask = K.clip(x, 0, 1)
inputs_masked = K.batch_dot(inputs, mask, [1, 1])
return inputs_masked
def compute_output_shape(self, input_shape):
if type(input_shape[0])is tuple:
return tuple([None, input_shape[0][-1]])
else:
return tuple([None, input_shape[-1]] ) | Diabetic Retinopathy Detection |
5,652,045 | def test_fat2(conf_list):
test_tmp = test.drop(["ForecastId"],axis=1)
test_tmp["ConfirmedCases"] = np.array(conf_list)
pr = model_fat2.predict(test_tmp)
tmp_pr = []
for i in pr:
if i < 0:
tmp_pr.append(0)
continue
tmp_pr.append(int(i))
pr_fat = tmp_pr
for i in range(1,len(tmp_pr)) :
if tmp_pr[i] < tmp_pr[i-1]:
tmp_pr[i] = tmp_pr[i-1]
return tmp_pr<predict_on_test> | def squash(vectors, axis=-1):
s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
scale = s_squared_norm /(1 + s_squared_norm)/ K.sqrt(s_squared_norm)
return scale * vectors | Diabetic Retinopathy Detection |
5,652,045 | def test_conf2(fat_list):
test_tmp = test.drop(["ForecastId"],axis=1)
test_tmp["Fatalities"] = np.array(fat_list)
pr = model_conf2.predict(test_tmp)
tmp_pr = []
for i in pr:
if i < 0:
tmp_pr.append(0)
continue
tmp_pr.append(int(i))
pr_conf = tmp_pr
for i in range(1,len(tmp_pr)) :
if tmp_pr[i] < tmp_pr[i-1]:
tmp_pr[i] = tmp_pr[i-1]
return tmp_pr<concatenate> | class CapsuleLayer(layers.Layer):
def __init__(self, num_capsule, dim_vector, num_routing=3,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(CapsuleLayer, self ).__init__(**kwargs)
self.num_capsule = num_capsule
self.dim_vector = dim_vector
self.num_routing = num_routing
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
self.input_num_capsule = input_shape[1]
self.input_dim_vector = input_shape[2]
self.W = self.add_weight(shape=[self.input_num_capsule, self.num_capsule, self.input_dim_vector, self.dim_vector],
initializer=self.kernel_initializer,
name='W')
self.bias = self.add_weight(shape=[1, self.input_num_capsule, self.num_capsule, 1, 1],
initializer=self.bias_initializer,
name='bias',
trainable=False)
self.built = True
def call(self, inputs, training=None):
inputs_expand = K.expand_dims(K.expand_dims(inputs, 2), 2)
inputs_tiled = K.tile(inputs_expand, [1, 1, self.num_capsule, 1, 1])
inputs_hat = tf.scan(lambda ac, x: K.batch_dot(x, self.W, [3, 2]),
elems=inputs_tiled,
initializer=K.zeros([self.input_num_capsule, self.num_capsule, 1, self.dim_vector]))
assert self.num_routing > 0, 'The num_routing should be > 0.'
for i in range(self.num_routing):
c = tf.nn.softmax(self.bias, dim=2)
outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))
if i != self.num_routing - 1:
self.bias += K.sum(inputs_hat * outputs, -1, keepdims=True)
return K.reshape(outputs, [-1, self.num_capsule, self.dim_vector])
def compute_output_shape(self, input_shape):
return tuple([None, self.num_capsule, self.dim_vector] ) | Diabetic Retinopathy Detection |
5,652,045 | conf_list2 = test_conf2(fat_list)
fat_list2 = test_fat(conf_list )<create_dataframe> | def PrimaryCap(inputs, dim_vector, n_channels, kernel_size, strides, padding):
output = layers.Conv2D(filters=dim_vector*n_channels, kernel_size=kernel_size, strides=strides, padding=padding )(inputs)
outputs = layers.Reshape(target_shape=[-1, dim_vector] )(output)
return layers.Lambda(squash )(outputs ) | Diabetic Retinopathy Detection |
5,652,045 | submission2 = pd.DataFrame()
submission2["ForecastId"] = np.array(test["ForecastId"])
submission2["ConfirmedCases"] = np.array(conf_list2)
submission2["Fatalities"] = np.array(fat_list2 )<save_to_csv> | def CapsNet(input_shape, n_class, num_routing):
x = layers.Input(shape=input_shape)
conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1' )(x)
primarycaps = PrimaryCap(conv1, dim_vector=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
digitcaps = CapsuleLayer(num_capsule=n_class, dim_vector=16, num_routing=num_routing, name='digitcaps' )(primarycaps)
out_caps = Length(name='out_caps' )(digitcaps)
y = layers.Input(shape=(n_class,))
masked = Mask()([digitcaps, y])
x_recon = layers.Dense(512, activation='relu' )(masked)
x_recon = layers.Dense(1024, activation='relu' )(x_recon)
x_recon = layers.Dense(width*breadth*3, activation='sigmoid' )(x_recon)
x_recon = layers.Reshape(target_shape=[width, breadth, 3], name='out_recon' )(x_recon)
return models.Model([x, y], [out_caps, x_recon] ) | Diabetic Retinopathy Detection |
5,652,045 | submission2.to_csv("submission.csv",index = False )<load_from_csv> | def margin_loss(y_true, y_pred):
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
0.5 *(1 - y_true)* K.square(K.maximum(0., y_pred - 0.1))
return K.mean(K.sum(L, 1)) | Diabetic Retinopathy Detection |
5,652,045 | pd.set_option('mode.chained_assignment', None)
test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv")
train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv" )<data_type_conversions> | width, breadth = 32, 32 | Diabetic Retinopathy Detection |
5,652,045 | train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region','Province_State','Date'])
test = test.sort_values(['Country_Region','Province_State','Date'] )<compute_test_metric> | Diabetic Retinopathy Detection |
|
5,652,045 | def RMSLE(pred,actual):
return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<feature_engineering> | trainCSV = pd.read_csv('.. /input/diabetic-retinopathy-detection/trainLabels.csv')
trainCSV['PatientId'] = trainCSV['image'].map(lambda x: x.split('_')[0])
trainCSV['imagePath'] = trainCSV['image'].map(lambda x: os.path.join('.. /input/diabetic-retinopathy-detection/','{}.jpeg'.format(x)))
trainCSV['exists'] = trainCSV['imagePath'].map(os.path.exists)
trainCSV['leftorright'] = trainCSV['image'].map(lambda x: 'left' if x.split('_')[-1]=='left' else 'right')
trainCSV['label'] = trainCSV['level'].map(lambda x: to_categorical(x, 5))
trainCSV.dropna(inplace = True)
trainCSV = trainCSV[trainCSV['exists']] | Diabetic Retinopathy Detection |
5,652,045 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]<prepare_x_and_y> | from PIL import Image
import time
import sys | Diabetic Retinopathy Detection |
5,652,045 | pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 10):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
model = make_pipeline(PolynomialFeatures(2), BayesianRidge())
model.fit(adjusted_X_train,adjusted_y_train_confirmed)
y_hat_confirmed = model.predict(adjusted_X_pred)
model.fit(adjusted_X_train,adjusted_y_train_fatalities)
y_hat_fatalities = model.predict(adjusted_X_pred)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
pred_data['ConfirmedCases_hat'] = np.concatenate(( np.repeat(0, len(pred_data)- len(y_hat_confirmed)) , y_hat_confirmed), axis = 0)
pred_data['Fatalities_hat'] = np.concatenate(( np.repeat(float(0), len(pred_data)- len(y_hat_fatalities)) , y_hat_fatalities), axis = 0)
pred_data_all = pred_data_all.append(pred_data )<merge> | def transformImagetoArray(imagePathsList, width=480, breadth=480):
startTime = time.time()
imagesArrayList = []
for imagePath in imagePathsList:
image = np.array(Image.open(imagePath ).resize(( width, breadth)) , np.float ).reshape(width, breadth, 3)
imagesArrayList.append(image)
print("needed_time_for_makingArrays: {}".format(time.time() -startTime)+ "[sec]")
imagesArray = np.asarray(imagesArrayList)
print('imagesArray: {} MB'.format(str(sys.getsizeof(imagesArray)/(10**6))))
return imagesArray | Diabetic Retinopathy Detection |
5,652,045 | df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0<create_dataframe> | x = transformImagetoArray(list(trainCSV['imagePath']), width=width, breadth=breadth)
y = np.asarray(list(trainCSV['label'])) | Diabetic Retinopathy Detection |
5,652,045 | df_val_1 = df_val.copy()<compute_test_metric> | x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2 ) | Diabetic Retinopathy Detection |
5,652,045 | RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric> | Diabetic Retinopathy Detection |
|
5,652,045 | RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<compute_test_metric> | del x
gc.collect() | Diabetic Retinopathy Detection |
5,652,045 | val_score = []
for country in df_val['Country_Region'].unique() :
df_val_country = df_val[(df_val['Country_Region'] == country)&(df_val['Fatalities'].isnull() == False)]
val_score.append([country, RMSLE(df_val_country['ConfirmedCases'].values,df_val_country['ConfirmedCases_hat'].values),RMSLE(df_val_country['Fatalities'].values,df_val_country['Fatalities_hat'].values)] )<create_dataframe> | print('x train: %s' % str(x_train.shape))
print('x test: %s' % str(x_test.shape))
print('y train: %s' % str(y_train.shape))
print('y test: %s' % str(y_test.shape)) | Diabetic Retinopathy Detection |
5,652,045 | df_val_score = pd.DataFrame(val_score)
df_val_score.columns = ['Country','ConfirmedCases_Scored','Fatalities_Scored']
df_val_score.sort_values('ConfirmedCases_Scored', ascending = False )<groupby> | def train(model, data, epoch_size_frac=1.0, epochs=100, batch_size=64):
(x_train, y_train),(x_test, y_test)= data
log = callbacks.CSVLogger('log.csv')
checkpoint = callbacks.ModelCheckpoint('weights-{epoch:02d}val_loss-{val_loss}.h5',
save_best_only=True, save_weights_only=False, verbose=1)
lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001 * np.exp(-epoch / 10.))
early_stopping = callbacks.EarlyStopping(monitor = 'val_loss', min_delta=0, patience = 5, verbose = 1)
model.compile(optimizer='adam',
loss=[margin_loss, 'mse'],
loss_weights=[1., 0.0005],
metrics={'out_caps': 'accuracy'})
def train_generator(x, y, batch_size, shift_fraction=0.) :
train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
height_shift_range=shift_fraction)
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield([x_batch, y_batch], [y_batch, x_batch])
model.fit_generator(generator=train_generator(x_train, y_train, batch_size, 0.1),
max_queue_size=2,
steps_per_epoch=int(epoch_size_frac*y_train.shape[0] / batch_size),
epochs=epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, checkpoint, lr_decay, early_stopping])
model.save('trained_model.h5')
print('Trained model saved to 'trained_model.h5'')
return model | Diabetic Retinopathy Detection |
5,652,045 | df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()<feature_engineering> | train(model=model, data=(( x_train, y_train),(x_test[:60], y_test[:60])) ,
epoch_size_frac = 1 ) | Diabetic Retinopathy Detection |
5,652,045 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 10):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit()
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit()
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_2 = df_val.copy()<feature_engineering> | def combine_images(generated_images):
num = generated_images.shape[0]
width = int(np.sqrt(num))
height = int(np.ceil(float(num)/width))
shape = generated_images.shape[1:3]
image = np.zeros(( height*shape[0], width*shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index/width)
j = index % width
image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \
img[:, :, 0]
return image
def test(model, data):
x_test, y_test = data
y_pred, x_recon = model.predict([x_test, y_test], batch_size=100)
print('-'*50)
print('Test acc:', np.sum(np.argmax(y_pred, 1)== np.argmax(y_test, 1)) /y_test.shape[0])
img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
image = img * 255
Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
print()
print('Reconstructed images are saved to./real_and_recon.png')
print('-'*50)
plt.imshow(plt.imread("real_and_recon.png",))
plt.show() | Diabetic Retinopathy Detection |
5,652,045 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 10):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_3 = df_val.copy()<compute_test_metric> | test(model=model, data=(x_test[:100], y_test[:100])) | Diabetic Retinopathy Detection |
5,652,045 | method_list = ['Poly Bayesian Ridge','Exponential Smoothing','SARIMA']
method_val = [df_val_1,df_val_2,df_val_3]
for i in range(0,3):
df_val = method_val[i]
method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)]
print(method_score )<save_to_csv> | del x_train, x_test, y_train, y_test
gc.collect() | Diabetic Retinopathy Detection |
5,652,045 | df_val = df_val_3
submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']]
submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
submission.to_csv('submission.csv', index=False)
submission<import_modules> | def predictandSaveSubmission() :
imagePaths = os.listdir('.. /input/resized-2015-2019-blindness-detection-images/resized test 15')
predictionList = []
for imagePath in imagePaths:
tmp = [int(imagePath.split('.')[0].split('_')[0]), imagePath.split('.')[0].split('_')[1], imagePath.split('.')[0]]
imagePath = '.. /input/resized-2015-2019-blindness-detection-images/resized test 15/' + imagePath
imageArray = np.array([np.array(Image.open(imagePath ).resize(( width, breadth)) , np.float ).reshape(width, breadth, 3)])
y_pred, _ = model.predict_on_batch([imageArray, np.zeros(( 1, 5)) ])
tmp += [1]
predictionList.append(tmp)
predictionList.sort(key=lambda x:(x[0], x[1]))
predictionDict = {}
predictionDict['image'] = [i[2] for i in predictionList]
predictionDict['level'] = [i[3] for i in predictionList]
df_submission = pd.DataFrame(predictionDict)
df_submission.to_csv("submission.csv",index=False ) | Diabetic Retinopathy Detection |
5,652,045 | import pandas as pd
from datetime import datetime
from sklearn import preprocessing
from xgboost import XGBRegressor
from sklearn.tree import DecisionTreeRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
<load_from_csv> | predictandSaveSubmission() | Diabetic Retinopathy Detection |
1,662,531 | path = '/kaggle/input/covid19-global-forecasting-week-2/'
df_train = pd.read_csv(path+'train.csv')
df_test = pd.read_csv(path+'test.csv' )<data_type_conversions> | from sklearn.metrics import roc_curve, auc
import xgboost as xgb | Flavours of Physics: Finding τ → μμμ |
1,662,531 | df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True )<something_strange> | print('Loading the training/test data using pandas...')
train = pd.read_csv('.. /input/training.csv')
test = pd.read_csv('.. /input/test.csv')
check_agreement = pd.read_csv('.. /input/check_agreement.csv')
check_correlation = pd.read_csv('.. /input/check_correlation.csv' ) | Flavours of Physics: Finding τ → μμμ |
1,662,531 | EMPTY_VAL = "EMPTY_VAL"
def fillState(state, country):
if state == EMPTY_VAL: return country
return state<data_type_conversions> | def add_features(df):
df['flight_dist_sig'] = df['FlightDistance']/df['FlightDistanceError']
df['flight_dist_sig2'] =(df['FlightDistance']/df['FlightDistanceError'])**2
df['NEW_IP_dira'] = df['IP']*df['dira']
df['p0p2_ip_ratio']=df['IP']/df['IP_p0p2']
df['p1p2_ip_ratio']=df['IP']/df['IP_p1p2']
df['DCA_MAX'] = df.loc[:, ['DOCAone', 'DOCAtwo', 'DOCAthree']].max(axis=1)
df['iso_bdt_min'] = df.loc[:, ['p0_IsoBDT', 'p1_IsoBDT', 'p2_IsoBDT']].min(axis=1)
df['iso_min'] = df.loc[:, ['isolationa', 'isolationb', 'isolationc','isolationd', 'isolatione', 'isolationf']].min(axis=1)
df['NEW_FD_LT']=df['FlightDistance']/df['LifeTime']
return df | Flavours of Physics: Finding τ → μμμ |
1,662,531 | df_train['Province_State'].fillna(EMPTY_VAL, inplace=True)
df_train['Province_State'] = df_train.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1)
df_train.loc[:, 'Date'] = df_train.Date.dt.strftime("%m%d")
df_train["Date"] = df_train["Date"].astype(int)
print(df_train.shape)
df_train.head()<data_type_conversions> | train = add_features(train)
test = add_features(test)
check_agreement = add_features(check_agreement)
check_correlation = add_features(check_correlation ) | Flavours of Physics: Finding τ → μμμ |
1,662,531 | df_test['Province_State'].fillna(EMPTY_VAL, inplace=True)
df_test['Province_State'] = df_test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1)
df_test.loc[:, 'Date'] = df_test.Date.dt.strftime("%m%d")
df_test["Date"] = df_test["Date"].astype(int)
print(df_test.shape)
df_test.head()<categorify> | filter_out = ['id','min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'isolationb', 'isolationc', 'DOCAone', 'DOCAtwo', 'DOCAthree','CDF1', 'CDF2', 'CDF3']
features = list(f for f in train.columns if f not in filter_out ) | Flavours of Physics: Finding τ → μμμ |
1,662,531 | le = preprocessing.LabelEncoder()
df_train['Country_Region'] = le.fit_transform(df_train['Country_Region'])
df_train['Province_State'] = le.fit_transform(df_train['Province_State'])
df_test['Country_Region'] = le.fit_transform(df_test['Country_Region'])
df_test['Province_State'] = le.fit_transform(df_test['Province_State'] )<choose_model_class> | print('Training XGBoost model...')
model_xgb = xgb.XGBClassifier()
params ={'nthread': 4,
'objective': 'binary:logistic',
'max_depth' : 8,
'min_child_weight': 3,
'learning_rate' : 0.1,
'n_estimators' : 300,
'subsample' : 0.9,
'colsample_bytree' : 0.5,
'silent': 1}
model_xgb.fit(train[features],train.signal ) | Flavours of Physics: Finding τ → μμμ |
1,662,531 | def build_model_1() :
model = RandomForestRegressor(n_estimators = 100, random_state = 0)
return model
def build_model_2() :
model = XGBRegressor(n_estimators=1000)
return model
def build_model_3() :
model = DecisionTreeRegressor(random_state=1)
return model
def build_model_4() :
model = LogisticRegression()
return model
def build_model_5() :
model = LinearRegression()
return model
def build_model_6() :
model = LGBMRegressor(random_state=5)
return model
def build_model_7() :
model = LGBMRegressor(iterations=2)
return model
<create_dataframe> | pred_test = model_xgb.predict_proba(test[features])[:,1]
result = pd.DataFrame({'id': test.id})
result['prediction'] = pred_test | Flavours of Physics: Finding τ → μμμ |
1,662,531 | <prepare_x_and_y><EOS> | result.to_csv('submission_nacho.csv', index=False, sep=',' ) | Flavours of Physics: Finding τ → μμμ |
14,199,366 | tt['ConfirmedCases_Pred1'] = tt[['ConfirmedCases','ConfirmedCases_Pred1']].max(axis=1)
tt['Fatalities_Pred1'] = tt[['Fatalities','Fatalities_Pred1']].max(axis=1)
tt['ConfirmedCases_Pred1'] = tt['ConfirmedCases_Pred1'].fillna(0)
tt['Fatalities_Pred1'] = tt['Fatalities_Pred1'].fillna(0)
tt.loc[~tt['ConfirmedCases'].isna() , 'ConfirmedCases_Pred1'] = tt.loc[~tt['ConfirmedCases'].isna() ]['ConfirmedCases']
tt.loc[~tt['Fatalities'].isna() , 'Fatalities_Pred1'] = tt.loc[~tt['Fatalities'].isna() ]['Fatalities']
tt['ConfirmedCases_Pred1'] = tt.groupby('Place')['ConfirmedCases_Pred1'].transform('cummax')
tt['Fatalities_Pred1'] = tt.groupby('Place')['Fatalities_Pred1'].transform('cummax' )<load_from_csv> | from catboost import CatBoostClassifier | Click-Through Rate Prediction |
14,199,366 | ss = pd.read_csv('.. /input/covid19-global-forecasting-week-2/submission.csv' )<save_to_csv> | file_path = "/kaggle/input/avazu-ctr-prediction/train.gz"
data_df = pd.read_csv(file_path, header=0, nrows=10000000 ).sample(frac=1 ) | Click-Through Rate Prediction |
14,199,366 | mysub = tt.dropna(subset=['ForecastId'])[['ForecastId','ConfirmedCases_Pred1','Fatalities_Pred1']]
mysub['ForecastId'] = mysub['ForecastId'].astype('int')
mysub = mysub.rename(columns={'ConfirmedCases_Pred1':'ConfirmedCases',
'Fatalities_Pred1': 'Fatalities'})
mysub.to_csv('submission.csv', index=False )<load_from_csv> | train_count = int(len(data_df)* 0.9 ) | Click-Through Rate Prediction |
14,199,366 | path = '.. /input/covid19-global-forecasting-week-2/'
train = pd.read_csv(path + 'train.csv')
test = pd.read_csv(path + 'test.csv')
sub = pd.read_csv(path + 'submission.csv')
train['Date'] = train['Date'].apply(lambda x:(datetime.datetime.strptime(x, '%Y-%m-%d')))
test['Date'] = test['Date'].apply(lambda x:(datetime.datetime.strptime(x, '%Y-%m-%d')))
train['days'] =(train['Date'].dt.date - train['Date'].dt.date.min() ).dt.days
test['days'] =(test['Date'].dt.date - train['Date'].dt.date.min() ).dt.days
train.loc[train['Province_State'].isnull() , 'Province_State'] = 'N/A'
test.loc[test['Province_State'].isnull() , 'Province_State'] = 'N/A'
train['Area'] = train['Country_Region'] + '_' + train['Province_State']
test['Area'] = test['Country_Region'] + '_' + test['Province_State']
print(train['Date'].max())
print(test['Date'].min())
print(train['days'].max())
N_AREAS = train['Area'].nunique()
AREAS = np.sort(train['Area'].unique())
TRAIN_N = 70
print(train[train['days'] < TRAIN_N]['Date'].max())
train.head()<compute_train_metric> | X_train = data_df.iloc[:train_count,2:]
y_train = data_df.iloc[:train_count,1] | Click-Through Rate Prediction |
14,199,366 | def eval1(y, p):
val_len = y.shape[1] - TRAIN_N
return np.sqrt(mean_squared_error(y[:, TRAIN_N:TRAIN_N+val_len].flatten() , p[:, TRAIN_N:TRAIN_N+val_len].flatten()))
def run_c(params, X, test_size=50):
gr_base = []
gr_base_factor = []
x_min = np.ma.MaskedArray(X, X<1)
x_min = x_min.argmin(axis=1)
for i in range(X.shape[0]):
temp = X[i,:]
threshold = np.log(1+params['min cases for growth rate'])
num_days = params['last N days']
if(temp > threshold ).sum() > num_days:
gr_base.append(np.clip(np.diff(temp[temp > threshold])[-num_days:].mean() , 0, params['growth rate max']))
gr_base_factor.append(np.clip(np.diff(np.diff(temp[temp > threshold])) [-num_days:].mean() , -0.2, params["growth rate factor max"]))
else:
gr_base.append(params['growth rate default'])
gr_base_factor.append(params['growth rate factor'])
gr_base = np.array(gr_base)
gr_base_factor = np.array(gr_base_factor)
preds = X.copy()
for i in range(test_size):
delta = np.clip(preds[:, -1], np.log(2), None)+ gr_base *(1 + params['growth rate factor']*(1 + params['growth rate factor factor'])**(i)) **(i)
preds = np.hstack(( preds, delta.reshape(-1,1)))
return preds
params = {
"min cases for growth rate": 30,
"last N days": 5,
"growth rate default": 0.25,
"growth rate max": 0.3,
"growth rate factor max": -0.01,
"growth rate factor": -0.05,
"growth rate factor factor": 0.005,
}
x = train_p_c[train_p_c.index=="China_Qinghai"]
x = train_p_c
preds_c_1 = run_c(params, np.log(1+x.values)[:,:TRAIN_N])
<compute_test_metric> | X_val = data_df.iloc[train_count:,2:]
y_val = data_df.iloc[train_count:,1] | Click-Through Rate Prediction |
14,199,366 | warnings.filterwarnings("ignore")
def f(x, K, a, x0):
return K * x ** a * np.exp(-x/x0)
def eval1(y, p):
val_len = y.shape[1] - TRAIN_N
return np.sqrt(mean_squared_error(y[:, TRAIN_N:TRAIN_N+val_len].flatten() , p[:, TRAIN_N:TRAIN_N+val_len].flatten()))
def run(params, X, X_change, test_size=50):
print(X)
x_mins = np.ma.MaskedArray(X, X<10)
x_mins = x_mins.argmin(axis=1)
print(x_mins)
popts = []
scores = []
for i in tqdm(range(X_change.shape[0])) :
if X[i,:].sum() == 0:
x_mins[i] = X.shape[1] - 1
best_score = 100
best_popt = [1,1,1]
x_min_max = X_change.shape[1]
x_min_max = np.minimum(x_mins[i] + 10, X_change.shape[1])
early_stopping = 5
early_stopping_count = 0
for x_min in np.arange(x_mins[i], x_min_max):
x = np.arange(x_min, X_change.shape[1])- x_min
y = X_change[i, x_min:]
try:
popt, pcov = curve_fit(f, x,y, bounds=(0, [10, 5, np.max(x)]))
except:
popt = [1,1,1]
p = np.zeros(X_change.shape[1])
p[x_min:] = f(x, *popt)
p = np.cumsum(p, axis=0)
score = np.sqrt(mean_squared_error(np.log1p(X[i,:]), np.log1p(p)))
if score < best_score:
best_score = score
best_popt = popt
x_mins[i] = x_min
else:
early_stopping_count += 1
if early_stopping_count >= early_stopping:
continue
popts.append(best_popt)
scores.append(best_score)
preds = X_change.copy()
preds_new = np.zeros(( X.shape[0], test_size))
for i in range(X.shape[0]):
x = np.arange(X.shape[1], test_size+X.shape[1])- x_mins[i]
y = f(x, *popts[i])
y = y[-test_size-X.shape[1]:]
preds_new[i,:] = y
preds = np.hstack(( preds, preds_new))
preds = np.cumsum(preds, axis=1)
return preds, x_mins, scores
x1 = train_p_c
x = train_p_c.values[:,:TRAIN_N]
x_c = train_p_c_change.values[:,:TRAIN_N]
params = {}
preds_c_pl, x_mins, scores = run(params, x, x_c)
preds_c_pl = np.log1p(preds_c_pl)
<prepare_output> | cat_features = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] | Click-Through Rate Prediction |
14,199,366 | preds_c = preds_c_1.copy()
idx = np.where(x_mins<100)
preds_c[idx] = 0.8 * preds_c_1[idx] + 0.2 * preds_c_pl[idx]
for i in range(N_AREAS):
if 'China' in AREAS[i] and preds_c[i, TRAIN_N-1] < np.log(31):
preds_c[i, TRAIN_N:] = preds_c[i, TRAIN_N-1]
<prepare_x_and_y> | model = CatBoostClassifier(
iterations=50,
learning_rate=0.5,
task_type='GPU',
loss_function='Logloss',
) | Click-Through Rate Prediction |
14,199,366 | f_rate =(train_p_f / train_p_c ).fillna(0)
X_c = np.log(1+train_p_c.values)[:,:TRAIN_N]
X_f = train_p_f.values[:,:TRAIN_N]<normalization> | model.fit(
X_train, y_train,
eval_set=(X_val, y_val),
cat_features=cat_features,
verbose=10,
) | Click-Through Rate Prediction |
14,199,366 | def lin_w(sz):
res = np.linspace(0, 1, sz+1, endpoint=False)[1:]
return np.append(res, np.append([1], res[::-1]))
def run_f(params, X_c, X_f, X_f_r, test_size=50):
X_f_r = np.array(np.ma.mean(np.ma.masked_outside(X_f_r, 0.06, 0.4)[:,:], axis=1))
X_f_r = np.clip(X_f_r, params['fatality_rate_lower'], params['fatality_rate_upper'])
X_c = np.clip(np.exp(X_c)-1, 0, None)
preds = X_f.copy()
train_size = X_f.shape[1] - 1
for i in range(test_size):
t_lag = train_size+i-params['length']
t_wsize = 3
delta = np.average(np.diff(X_c, axis=1)[:, t_lag-t_wsize:t_lag+1+t_wsize], axis=1)
delta = params['absolute growth'] + delta * X_f_r
preds = np.hstack(( preds, preds[:, -1].reshape(-1,1)+ delta.reshape(-1,1)))
return preds
params = {
"length": 6,
"absolute growth": 0.02,
"fatality_rate_lower": 0.035,
"fatality_rate_upper": 0.40,
}
preds_f_1 = run_f(params, preds_c, X_f, f_rate.values[:,:TRAIN_N])
preds_f_1 = np.log(1+preds_f_1)
preds_f_1 = np.log1p(0.9*(np.exp(preds_f_1)-1))
<prepare_output> | model.get_feature_importance(prettified=True ) | Click-Through Rate Prediction |
14,199,366 | preds_f = preds_f_1<compute_test_metric> | test_file_path = "/kaggle/input/avazu-ctr-prediction/test.gz"
test_df = pd.read_csv(test_file_path, header=0, dtype=str ) | Click-Through Rate Prediction |
14,199,366 | if False:
val_len = train_p_c.values.shape[1] - TRAIN_N
for i in range(val_len):
d = i + TRAIN_N
m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c.values[:, d]), preds_c[:, d]))
m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f.values[:, d]), preds_f[:, d]))
print(f"{d}: {(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]")
print()
m1 = np.sqrt(mean_squared_error(np.log(1 + train_p_c.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_c[:, TRAIN_N:TRAIN_N+val_len].flatten()))
m2 = np.sqrt(mean_squared_error(np.log(1 + train_p_f.values[:, TRAIN_N:TRAIN_N+val_len] ).flatten() , preds_f[:, TRAIN_N:TRAIN_N+val_len].flatten()))
print(f"{(m1 + m2)/2:8.5f} [{m1:8.5f} {m2:8.5f}]" )<categorify> | y_test = model.predict(X_test,
prediction_type='Probability',
ntree_start=0, ntree_end=model.get_best_iteration() ,
thread_count=-1, verbose=None ) | Click-Through Rate Prediction |
14,199,366 | temp = pd.DataFrame(np.clip(np.exp(preds_c)- 1, 0, None))
temp['Area'] = AREAS
temp = temp.melt(id_vars='Area', var_name='days', value_name="ConfirmedCases")
test = test.merge(temp, how='left', left_on=['Area', 'days'], right_on=['Area', 'days'])
temp = pd.DataFrame(np.clip(np.exp(preds_f)- 1, 0, None))
temp['Area'] = AREAS
temp = temp.melt(id_vars='Area', var_name='days', value_name="Fatalities")
test = test.merge(temp, how='left', left_on=['Area', 'days'], right_on=['Area', 'days'])
test.head()<save_to_csv> | id_test.join(pd.DataFrame(y_test)) | Click-Through Rate Prediction |
14,199,366 | test.to_csv("submission.csv", index=False, columns=["ForecastId", "ConfirmedCases", "Fatalities"] )<sort_values> | submission_df = pd.read_csv("/kaggle/input/avazu-ctr-prediction/sampleSubmission.gz" ) | Click-Through Rate Prediction |
14,199,366 | <set_options><EOS> | submission_df["click"] = y_test[:, 1]
submission_df.to_csv("submission.csv", index=False)
submission_df.head() | Click-Through Rate Prediction |
426,677 | <SOS> metric: CategorizationAccuracy Kaggle data source: cdiscount-image-classification-challenge<load_from_csv> | import numpy as np
import pandas as pd
import io
import bson
import matplotlib.pyplot as plt
from skimage.data import imread
from tqdm import tqdm_notebook | Cdiscount’s Image Classification Challenge |
426,677 | train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv")
test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv")
tt = pd.concat([train, test], sort=False)
tt = train.merge(test, on=['Province_State','Country_Region','Date'], how='outer')
def name_place(x):
try:
x_new = x['Country_Region'] + "_" + x['Province_State']
except:
x_new = x['Country_Region']
return x_new
tt['Place'] = tt.apply(lambda x: name_place(x), axis=1)
tt['Date'] = pd.to_datetime(tt['Date'])
tt['doy'] = tt['Date'].dt.dayofyear
tt['dow'] = tt['Date'].dt.dayofweek
tt['hasProvidence'] = ~tt['Province_State'].isna()
country_meta = pd.read_csv('.. /input/covid19-forecasting-metadata/region_metadata.csv')
tt = tt.merge(country_meta, how='left')
country_date_meta = pd.read_csv('.. /input/covid19-forecasting-metadata/region_date_metadata.csv')
tt['HasFatality'] = tt.groupby('Place')['Fatalities'].transform(lambda x: x.max() > 0)
tt['HasCases'] = tt.groupby('Place')['ConfirmedCases'].transform(lambda x: x.max() > 0)
first_case_date = tt.query('ConfirmedCases >= 1' ).groupby('Place')['Date'].min().to_dict()
ten_case_date = tt.query('ConfirmedCases >= 10' ).groupby('Place')['Date'].min().to_dict()
hundred_case_date = tt.query('ConfirmedCases >= 100' ).groupby('Place')['Date'].min().to_dict()
first_fatal_date = tt.query('Fatalities >= 1' ).groupby('Place')['Date'].min().to_dict()
ten_fatal_date = tt.query('Fatalities >= 10' ).groupby('Place')['Date'].min().to_dict()
hundred_fatal_date = tt.query('Fatalities >= 100' ).groupby('Place')['Date'].min().to_dict()
tt['First_Case_Date'] = tt['Place'].map(first_case_date)
tt['Ten_Case_Date'] = tt['Place'].map(ten_case_date)
tt['Hundred_Case_Date'] = tt['Place'].map(hundred_case_date)
tt['First_Fatal_Date'] = tt['Place'].map(first_fatal_date)
tt['Ten_Fatal_Date'] = tt['Place'].map(ten_fatal_date)
tt['Hundred_Fatal_Date'] = tt['Place'].map(hundred_fatal_date)
tt['Days_Since_First_Case'] =(tt['Date'] - tt['First_Case_Date'] ).dt.days
tt['Days_Since_Ten_Cases'] =(tt['Date'] - tt['Ten_Case_Date'] ).dt.days
tt['Days_Since_Hundred_Cases'] =(tt['Date'] - tt['Hundred_Case_Date'] ).dt.days
tt['Days_Since_First_Fatal'] =(tt['Date'] - tt['First_Fatal_Date'] ).dt.days
tt['Days_Since_Ten_Fatal'] =(tt['Date'] - tt['Ten_Fatal_Date'] ).dt.days
tt['Days_Since_Hundred_Fatal'] =(tt['Date'] - tt['Hundred_Fatal_Date'] ).dt.days
smoking = pd.read_csv(".. /input/smokingstats/share-of-adults-who-smoke.csv")
smoking = smoking.rename(columns={'Smoking prevalence, total(ages 15+ )(% of adults)': 'Smoking_Rate'})
smoking_dict = smoking.groupby('Entity')['Year'].max().to_dict()
smoking['LastYear'] = smoking['Entity'].map(smoking_dict)
smoking = smoking.query('Year == LastYear' ).reset_index()
smoking['Entity'] = smoking['Entity'].str.replace('United States', 'US')
tt = tt.merge(smoking[['Entity','Smoking_Rate']],
left_on='Country_Region',
right_on='Entity',
how='left',
validate='m:1')\
.drop('Entity', axis=1)
country_info = pd.read_csv('.. /input/countryinfo/covid19countryinfo.csv')
tt = tt.merge(country_info, left_on=['Country_Region','Province_State'],
right_on=['country','region'],
how='left',
validate='m:1')
us_state_info = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states_by_population')[0] \
[['State','Population estimate, July 1, 2019[2]']] \
.rename(columns={'Population estimate, July 1, 2019[2]' : 'Population'})
tt = tt.merge(us_state_info[['State','Population']],
left_on='Province_State',
right_on='State',
how='left')
tt['pop'] = pd.to_numeric(tt['pop'].str.replace(',',''))
tt['pop'] = tt['pop'].fillna(tt['Population'])
tt['pop'] = pd.to_numeric(tt['pop'])
tt['pop_diff'] = tt['pop'] - tt['Population']
tt['Population_final'] = tt['Population']
tt.loc[~tt['hasProvidence'], 'Population_final'] = tt.loc[~tt['hasProvidence']]['pop']
tt['Confirmed_Cases_Diff'] = tt.groupby('Place')['ConfirmedCases'].diff()
tt['Fatailities_Diff'] = tt.groupby('Place')['Fatalities'].diff()
max_date = tt.dropna(subset=['ConfirmedCases'])['Date'].max()
tt['gdp2019'] = pd.to_numeric(tt['gdp2019'].str.replace(',',''))<define_variables> | categories = pd.read_csv('.. /input/category_names.csv', index_col='category_id' ) | Cdiscount’s Image Classification Challenge |
426,677 | pop_dict = {'Angola': int(29.78 * 10**6),
'Australia_Australian Capital Territory': 423_800,
'Australia_New South Wales': int(7.544 * 10**6),
'Australia_Northern Territory': 244_300,
'Australia_Queensland' : int(5.071 * 10**6),
'Australia_South Australia' : int(1.677 * 10**6),
'Australia_Tasmania': 515_000,
'Australia_Victoria': int(6.359 * 10**6),
'Australia_Western Australia': int(2.589 * 10**6),
'Brazil': int(209.3 * 10**6),
'Canada_Alberta' : int(4.371 * 10**6),
'Canada_British Columbia' : int(5.071 * 10**6),
'Canada_Manitoba' : int(1.369 * 10**6),
'Canada_New Brunswick' : 776_827,
'Canada_Newfoundland and Labrador' : 521_542,
'Canada_Nova Scotia' : 971_395,
'Canada_Ontario' : int(14.57 * 10**6),
'Canada_Prince Edward Island' : 156_947,
'Canada_Quebec' : int(8.485 * 10**6),
'Canada_Saskatchewan': int(1.174 * 10**6),
'China_Anhui': int(62 * 10**6),
'China_Beijing': int(21.54 * 10**6),
'China_Chongqing': int(30.48 * 10**6),
'China_Fujian' : int(38.56 * 10**6),
'China_Gansu' : int(25.58 * 10**6),
'China_Guangdong' : int(113.46 * 10**6),
'China_Guangxi' : int(48.38 * 10**6),
'China_Guizhou' : int(34.75 * 10**6),
'China_Hainan' : int(9.258 * 10**6),
'China_Hebei' : int(74.7 * 10**6),
'China_Heilongjiang' : int(38.31 * 10**6),
'China_Henan' : int(94 * 10**6),
'China_Hong Kong' : int(7.392 * 10**6),
'China_Hubei' : int(58.5 * 10**6),
'China_Hunan' : int(67.37 * 10**6),
'China_Inner Mongolia' : int(24.71 * 10**6),
'China_Jiangsu' : int(80.4 * 10**6),
'China_Jiangxi' : int(45.2 * 10**6),
'China_Jilin' : int(27.3 * 10**6),
'China_Liaoning' : int(43.9 * 10**6),
'China_Macau' : 622_567,
'China_Ningxia' : int(6.301 * 10**6),
'China_Qinghai' : int(5.627 * 10**6),
'China_Shaanxi' : int(37.33 * 10**6),
'China_Shandong' : int(92.48 * 10**6),
'China_Shanghai' : int(24.28 * 10**6),
'China_Shanxi' : int(36.5 * 10**6),
'China_Sichuan' : int(81.1 * 10**6),
'China_Tianjin' : int(15 * 10**6),
'China_Tibet' : int(3.18 * 10**6),
'China_Xinjiang' : int(21.81 * 10**6),
'China_Yunnan' : int(45.97 * 10**6),
'China_Zhejiang' : int(57.37 * 10**6),
'Denmark_Faroe Islands' : 51_783,
'Denmark_Greenland' : 56_171,
'France_French Guiana' : 290_691,
'France_French Polynesia' : 283_007,
'France_Guadeloupe' : 395_700,
'France_Martinique' : 376_480,
'France_Mayotte' : 270_372,
'France_New Caledonia' : 99_926,
'France_Reunion' : 859_959,
'France_Saint Barthelemy' : 9_131,
'France_St Martin' : 32_125,
'Netherlands_Aruba' : 105_264,
'Netherlands_Curacao' : 161_014,
'Netherlands_Sint Maarten' : 41_109,
'Papua New Guinea' : int(8.251 * 10**6),
'US_Guam' : 164_229,
'US_Virgin Islands' : 107_268,
'United Kingdom_Bermuda' : 65_441,
'United Kingdom_Cayman Islands' : 61_559,
'United Kingdom_Channel Islands' : 170_499,
'United Kingdom_Gibraltar' : 34_571,
'United Kingdom_Isle of Man' : 84_287,
'United Kingdom_Montserrat' : 4_922
}
tt['Population_final'] = tt['Population_final'].fillna(tt['Place'].map(pop_dict))<feature_engineering> | df_dict = {
'category': prod_category,
'num_imgs': prod_num_imgs
}
df = pd.DataFrame(df_dict, index=prod_id)
del df_dict | Cdiscount’s Image Classification Challenge |
426,677 | tt.loc[tt['Place'] == 'Diamond Princess', 'Population final'] = 2_670<feature_engineering> | cat_counts = df.category.value_counts().to_frame()
cat_counts = cat_counts / cat_counts["category"].sum()
print(cat_counts.head())
print(pd.unique(df.category)) | Cdiscount’s Image Classification Challenge |
426,677 | tt['ConfirmedCases_Log'] = tt['ConfirmedCases'].apply(np.log1p)
tt['Fatalities_Log'] = tt['Fatalities'].apply(np.log1p )<feature_engineering> | cat_counts.sort_values(by="category",inplace=True)
bot_5_categories = cat_counts.head()
top_5_categories = cat_counts.tail()
print(bot_5_categories)
print(top_5_categories ) | Cdiscount’s Image Classification Challenge |
426,677 | <remove_duplicates><EOS> | samp_sub_df = pd.read_csv(".. /input/sample_submission.csv")
samp_sub_df["category_id"] = np.random.choice(cat_counts.index,size=len(samp_sub_df),p=cat_counts["category"].values)
samp_sub_df.to_csv("baised_rand_submission.csv", index=False ) | Cdiscount’s Image Classification Challenge |
10,553,275 | <SOS> metric: Custom Kaggle data source: halite<categorify> | env = make("halite", debug=True)
env.render() | Halite by Two Sigma |
10,553,275 | <categorify><EOS> | %%writefile submission.py
CONFIG_MAX_SHIPS=20
all_actions=[ShipAction.NORTH, ShipAction.EAST,ShipAction.SOUTH,ShipAction.WEST]
all_dirs=[Point(0,1), Point(1,0), Point(0,-1), Point(-1,0)]
start=None
num_shipyard_targets=4
size=None
ship_target={}
me=None
did_init=False
quiet=False
C=None
class Obj:
pass
turn=Obj()
turns_optimal=np.array(
[[0, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 8],
[0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7],
[0, 0, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7],
[0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6],
[0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6],
[0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
def print_enemy_ships(board):
print('
Enemy Ships')
for ship in board.ships.values() :
if ship.player_id != me.id:
print('{:6} {} halite {}'.format(ship.id,ship.position,ship.halite))
def print_actions(board):
print('
Ship Actions')
for ship in me.ships:
print('{:6} {} {} halite {}'.format(ship.id,ship.position,ship.next_action,ship.halite))
print('Shipyard Actions')
for sy in me.shipyards:
print('{:6} {} {}'.format(sy.id,sy.position,sy.next_action))
def print_none(*args):
pass
def compute_max_ships(step):
if step < 200:
return CONFIG_MAX_SHIPS
elif step < 300:
return CONFIG_MAX_SHIPS-2
elif step < 350:
return CONFIG_MAX_SHIPS-4
else:
return CONFIG_MAX_SHIPS-5
def set_turn_data(board):
turn.num_ships=len(me.ships)
turn.max_ships=compute_max_ships(board.step)
turn.total_halite=me.halite
turn.halite_matrix=np.reshape(board.observation['halite'],(board.configuration.size,board.configuration.size))
turn.num_shipyards=len(me.shipyards)
turn.EP,turn.EH,turn.ES=gen_enemy_halite_matrix(board)
turn.taken={}
turn.last_episode =(board.step ==(board.configuration.episode_steps-2))
def init(obs,config):
global size
global print
if hasattr(config,'myval')and config.myval==9 and not quiet:
pass
else:
print=print_none
pprint.pprint=print_none
size = config.size
def limit(x,a,b):
if x<a:
return a
if x>b:
return b
return x
def num_turns_to_mine(C,H,rt_travel):
if C==0:
ch=0
elif H==0:
ch=turns_optimal.shape[0]
else:
ch=int(math.log(C/H)*2.5+5.5)
ch=limit(ch,0,turns_optimal.shape[0]-1)
rt_travel=int(limit(rt_travel,0,turns_optimal.shape[1]-1))
return turns_optimal[ch,rt_travel]
def halite_per_turn(carrying, halite,travel,min_mine=1):
turns=num_turns_to_mine(carrying,halite,travel)
if turns<min_mine:
turns=min_mine
mined=carrying+(1-.75**turns)*halite
return mined/(travel+turns), turns
def move(pos, action):
ret=None
if action==ShipAction.NORTH:
ret=pos+Point(0,1)
if action==ShipAction.SOUTH:
ret=pos+Point(0,-1)
if action==ShipAction.EAST:
ret=pos+Point(1,0)
if action==ShipAction.WEST:
ret=pos+Point(-1,0)
if ret is None:
ret=pos
return ret % size
def dirs_to(p1, p2, size=21):
deltaX, deltaY=p2 - p1
if abs(deltaX)>size/2:
if deltaX<0:
deltaX+=size
elif deltaX>0:
deltaX-=size
if abs(deltaY)>size/2:
if deltaY<0:
deltaY+=size
elif deltaY>0:
deltaY-=size
ret=[]
if deltaX>0:
ret.append(ShipAction.EAST)
if deltaX<0:
ret.append(ShipAction.WEST)
if deltaY>0:
ret.append(ShipAction.NORTH)
if deltaY<0:
ret.append(ShipAction.SOUTH)
if len(ret)==0:
ret=[None]
return ret,(deltaX,deltaY)
def shipyard_actions() :
for sy in me.shipyards:
if turn.num_ships < turn.max_ships:
if turn.total_halite >= 500 and sy.position not in turn.taken:
sy.next_action = ShipyardAction.SPAWN
turn.taken[sy.position]=1
turn.num_ships+=1
turn.total_halite-=500
def gen_enemy_halite_matrix(board):
EP=np.zeros(( size,size))
EH=np.zeros(( size,size))
ES=np.zeros(( size,size))
for id,ship in board.ships.items() :
if ship.player_id != me.id:
EH[ship.position.y,ship.position.x]=ship.halite
EP[ship.position.y,ship.position.x]=1
for id, sy in board.shipyards.items() :
if sy.player_id != me.id:
ES[sy.position.y,sy.position.x]=1
return EP,EH,ES
def dist(a,b):
action,step=dirs_to(a, b, size=21)
return abs(step[0])+ abs(step[1])
def nearest_shipyard(pos):
mn=100
best_pos=None
for sy in me.shipyards:
d=dist(pos, sy.position)
if d<mn:
mn=d
best_pos=sy.position
return mn,best_pos
def assign_targets(board,ships):
old_target=copy.copy(ship_target)
ship_target.clear()
if len(ships)==0:
return
halite_min=50
pts1=[]
pts2=[]
for pt,c in board.cells.items() :
assert isinstance(pt,Point)
if c.halite > halite_min:
pts1.append(pt)
for sy in me.shipyards:
for i in range(num_shipyard_targets):
pts2.append(sy.position)
C=np.zeros(( len(ships),len(pts1)+len(pts2)))
for i,ship in enumerate(ships):
for j,pt in enumerate(pts1+pts2):
d1=dist(ship.position,pt)
d2,shipyard_position=nearest_shipyard(pt)
if shipyard_position is None:
d2=1
my_halite=ship.halite
if j < len(pts1):
v, mining=halite_per_turn(my_halite,board.cells[pt].halite, d1+d2)
else:
if d1>0:
v=my_halite/d1
else:
v=0
if board.cells[pt].ship and board.cells[pt].ship.player_id != me.id:
enemy_halite=board.cells[pt].ship.halite
if enemy_halite <= my_halite:
v = -1000
else:
if d1<5:
v+= enemy_halite/(d1+1)
C[i,j]=v
print('C is {}'.format(C.shape))
row,col=scipy.optimize.linear_sum_assignment(C, maximize=True)
pts=pts1+pts2
for r,c in zip(row,col):
ship_target[ships[r].id]=pts[c]
print('
Ship Targets')
print('Ship position target')
for id,t in ship_target.items() :
st=''
ta=''
if board.ships[id].position==t:
st='MINE'
elif len(me.shipyards)>0 and t==me.shipyards[0].position:
st='SHIPYARD'
if id not in old_target or old_target[id] != ship_target[id]:
ta=' NEWTARGET'
print('{0:6} at({1[0]:2},{1[1]:2})assigned({2[0]:2},{2[1]:2})h {3:3} {4:10} {5:10}'.format(
id, board.ships[id].position, t, board.cells[t].halite,st, ta))
return
def make_avoidance_matrix(myship_halite):
filter=np.array([[0,1,0],[1,1,1],[0,1,0]])
bad_ship=np.logical_and(turn.EH <= myship_halite,turn.EP)
avoid=scipy.ndimage.convolve(bad_ship, filter, mode='wrap',cval=0.0)
avoid=np.logical_or(avoid,turn.ES)
return avoid
def make_attack_matrix(myship_halite):
attack=np.logical_and(turn.EH > myship_halite,turn.EP)
return attack
def get_max_halite_ship(board, avoid_danger=True):
mx=-1
the_ship=None
for ship in me.ships:
x=ship.position.x
y=ship.position.y
avoid=make_avoidance_matrix(ship.halite)
if ship.halite>mx and(not avoid_danger or not avoid[y,x]):
mx=ship.halite
the_ship=ship
return the_ship
def remove_dups(p):
ret=[]
for x in p:
if x not in ret:
ret.append(x)
return ret
def matrix_lookup(matrix,pos):
return matrix[pos.y,pos.x]
def ship_converts(board):
if turn.num_shipyards==0 and not turn.last_episode:
mx=get_max_halite_ship(board)
if mx is not None:
if mx.halite + turn.total_halite > 500:
mx.next_action=ShipAction.CONVERT
turn.taken[mx.position]=1
turn.num_shipyards+=1
turn.total_halite-=500
for ship in me.ships:
if ship.next_action:
continue
avoid=make_avoidance_matrix(ship.halite)
z=[matrix_lookup(avoid,move(ship.position,a)) for a in all_actions]
if np.all(z)and ship.halite > 500:
ship.next_action=ShipAction.CONVERT
turn.taken[ship.position]=1
turn.num_shipyards+=1
turn.total_halite-=500
print('ship id {} no escape converting'.format(ship.id))
if turn.last_episode and ship.halite > 500:
ship.next_action=ShipAction.CONVERT
turn.taken[ship.position]=1
turn.num_shipyards+=1
turn.total_halite-=500
def ship_moves(board):
ships=[ship for ship in me.ships if ship.next_action is None]
assign_targets(board,ships)
actions={}
for ship in ships:
if ship.id in ship_target:
a,delta = dirs_to(ship.position, ship_target[ship.id],size=size)
actions[ship.id]=a
else:
actions[ship.id]=[random.choice(all_actions)]
for ship in ships:
action=None
x=ship.position
avoid=make_avoidance_matrix(ship.halite)
attack=make_attack_matrix(ship.halite)
action_list=actions[ship.id]+[None]+all_actions
for a in all_actions:
m=move(x,a)
if attack[m.y,m.x]:
print('ship id {} attacking {}'.format(ship.id,a))
action_list.insert(0,a)
break
action_list=remove_dups(action_list)
for a in action_list:
m=move(x,a)
if avoid[m.y,m.x]:
print('ship id {} avoiding {}'.format(ship.id,a))
if m not in turn.taken and not avoid[m.y,m.x]:
action=a
break
ship.next_action=action
turn.taken[m]=1
def agent(obs, config):
global size
global start
global prev_board
global me
global did_init
start_step=time.time()
if start is None:
start=time.time()
if not did_init:
init(obs,config)
did_init=True
board = Board(obs, config)
me=board.current_player
set_turn_data(board)
print('==== step {} sim {}'.format(board.step,board.step+1))
print('ships {} shipyards {}'.format(turn.num_ships,turn.num_shipyards))
print_enemy_ships(board)
ship_converts(board)
ship_moves(board)
shipyard_actions()
print_actions(board)
print('time this turn: {:8.3f} total elapsed {:8.3f}'.format(time.time() -start_step,time.time() -start))
return me.next_actions
| Halite by Two Sigma |
363,803 | <SOS> metric: MulticlassLoss Kaggle data source: personalized-medicine:-redefining-cancer-treatment<set_options> | from sklearn import *
import sklearn
import pandas as pd
import numpy as np
import lightgbm as lgb | Personalized Medicine: Redefining Cancer Treatment |
363,803 | %matplotlib inline
set_matplotlib_formats('svg')
%matplotlib inline
sns.set_style("whitegrid" )<load_from_disk> | train = pd.read_csv('.. /input/training_variants')
test = pd.read_csv('.. /input/stage2_test_variants.csv')
trainx = pd.read_csv('.. /input/training_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
testx = pd.read_csv('.. /input/stage2_test_text.csv', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"] ) | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data = pd.read_json('.. /input/train.json.zip', compression='zip')
train_data.head()<count_missing_values> | train = pd.merge(train, trainx, how='left', on='ID' ).fillna('')
train = train.drop(["ID"], axis=1)
test = pd.merge(test, testx, how='left', on='ID' ).fillna('')
pid = test['ID'].values | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data.isnull().sum()<feature_engineering> | df_variants_test = pd.read_csv('.. /input/test_variants', usecols=['ID', 'Gene', 'Variation'])
df_text_test = pd.read_csv('.. /input/test_text', sep='\|\|', engine='python',
skiprows=1, names=['ID', 'Text'])
df_variants_test['Text'] = df_text_test['Text']
df_test = df_variants_test
df_labels_test = pd.read_csv('.. /input/stage1_solution_filtered.csv')
df_labels_test['Class'] = pd.to_numeric(df_labels_test.drop('ID', axis=1 ).idxmax(axis=1 ).str[5:])
df_test = df_test.merge(df_labels_test[['ID', 'Class']], on='ID', how='left' ).drop('ID', axis=1)
df_test = df_test[df_test['Class'].notnull() ]
df_stage_2_train = pd.concat([train, df_test])
df_stage_2_train.reset_index(drop=True, inplace=True)
train = df_stage_2_train
df_stage_2_train.info() | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data['interest'] = np.where(train_data.interest_level=='low', 0,
np.where(train_data.interest_level=='medium', 1, 2))<count_values> | y = train['Class'].values
train = train.drop(['Class'], axis=1)
train.info() | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data.building_id.value_counts().nlargest(10 )<data_type_conversions> | df_all = pd.concat(( train, test), axis=0, ignore_index=True)
df_all['Gene_Share'] = df_all.apply(lambda r: sum([1 for w in r['Gene'].split(' ')if w in r['Text'].split(' ')]), axis=1)
df_all['Variation_Share'] = df_all.apply(lambda r: sum([1 for w in r['Variation'].split(' ')if w in r['Text'].split(' ')]), axis=1 ) | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data.created = pd.to_datetime(train_data.created, format='%Y-%m-%d %H:%M:%S' )<feature_engineering> | for i in range(56):
df_all['Gene_'+str(i)] = df_all['Gene'].map(lambda x: str(x[i])if len(x)>i else '')
df_all['Variation'+str(i)] = df_all['Variation'].map(lambda x: str(x[i])if len(x)>i else '' ) | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data['month'] = train_data.created.dt.month
train_data['day_of_week'] = train_data.created.dt.weekday
train_data['hour'] = train_data.created.dt.hour<count_values> | gen_var_lst = sorted(list(train.Gene.unique())+ list(train.Variation.unique()))
print(len(gen_var_lst))
gen_var_lst = [x for x in gen_var_lst if len(x.split(' ')) ==1]
print(len(gen_var_lst))
i_ = 0 | Personalized Medicine: Redefining Cancer Treatment |
363,803 | print('Number of Unique Display Addresses is {}'.format(train_data.display_address.value_counts().shape[0]))<count_values> | for gen_var_lst_itm in gen_var_lst:
if i_ % 100 == 0: print(i_)
df_all['GV_'+str(gen_var_lst_itm)] = df_all['Text'].map(lambda x: str(x ).count(str(gen_var_lst_itm)))
i_ += 1 | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data.display_address.value_counts().nlargest(15 )<count_duplicates> | for c in df_all.columns:
if df_all[c].dtype == 'object':
if c in ['Gene','Variation']:
lbl = preprocessing.LabelEncoder()
df_all[c+'_lbl_enc'] = lbl.fit_transform(df_all[c].values)
df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x ).split(' ')))
elif c != 'Text':
lbl = preprocessing.LabelEncoder()
df_all[c] = lbl.fit_transform(df_all[c].values)
if c=='Text':
df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x ).split(' ')) ) | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data.latitude.nlargest(20 )<count_duplicates> | train = df_all.iloc[:len(train)]
test = df_all.iloc[len(train):]
class cust_regression_vals(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
def fit(self, x, y=None):
return self
def transform(self, x):
x = x.drop(['Gene', 'Variation','ID','Text'],axis=1 ).values
return x
class cust_txt_col(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, x):
return x[self.key].apply(str ) | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data.longitude.nlargest(20 )<feature_engineering> | print('Pipeline...')
fp = pipeline.Pipeline([
('union', pipeline.FeatureUnion(
n_jobs = -1,
transformer_list = [
('standard', cust_regression_vals()),
('pi1', pipeline.Pipeline([('Gene', cust_txt_col('Gene')) ,('count_Gene', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 10))),('tsvd1', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12)) ])) ,
('pi2', pipeline.Pipeline([('Variation', cust_txt_col('Variation')) ,('count_Variation', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 10))),('tsvd2', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12)) ])) ,
('pi3', pipeline.Pipeline([('Text', cust_txt_col('Text')) ,('tfidf_Text', feature_extraction.text.TfidfVectorizer(ngram_range=(1, 2))),('tsvd3', decomposition.TruncatedSVD(n_components=50, n_iter=25, random_state=12)) ]))
])
)])
train = fp.fit_transform(train); print(train.shape)
test = fp.transform(test); print(test.shape)
y = y - 1 | Personalized Medicine: Redefining Cancer Treatment |
363,803 | train_data['photos_number'] = train_data.photos.str.len()<feature_engineering> | denom = 0
for i in range(2):
params = {
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 50,
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 9
}
x1, x2, y1, y2 = model_selection.train_test_split(train, y, test_size=0.15, random_state=i)
train_set = lgb.Dataset(x1, y1)
valid_set = lgb.Dataset(x2, y2)
model = lgb.train(params, train_set, 1000, [valid_set], ["val"], verbose_eval=50, early_stopping_rounds=50)
score1 = metrics.log_loss(y2, model.predict(x2), labels = list(range(9)))
print(score1)
if denom != 0:
pred = model.predict(test)
preds += pred
else:
pred = model.predict(test)
preds = pred.copy()
denom += 1
submission = pd.DataFrame(pred, columns=['class'+str(c+1)for c in range(9)])
submission['ID'] = pid
submission.to_csv('submission_xgb_fold_' + str(i)+ '.csv', index=False ) | Personalized Medicine: Redefining Cancer Treatment |
363,803 | <train_model><EOS> | preds /= denom
submission = pd.DataFrame(preds, columns=['class'+str(c+1)for c in range(9)])
submission['ID'] = pid
submission.to_csv('submission_xgb.csv', index=False)
submission.head() | Personalized Medicine: Redefining Cancer Treatment |
540,156 | <categorify><EOS> | def xgboost_prediction(train,labels,test):
params = {}
params["objective"] = "reg:linear"
params["eta"] = 0.01
params["min_child_weight"] = 100
params["subsample"] = 0.6
params["colsample_bytree"] = 0.7
params["scale_pos_weight"] = 1.0
params["silent"] = 1
params["max_depth"] = 9
paramslist = list(params.items())
offset = 4000
num_rounds = 10000
xgb_test = xgb.DMatrix(test)
xgb_train = xgb.DMatrix(train[offset:,:], label=labels[offset:])
xgb_value = xgb.DMatrix(train[:offset,:], label=labels[:offset])
listforprint = [(xgb_train, 'train'),(xgb_value, 'value')]
model = xgb.train(paramslist, xgb_train, num_rounds, listforprint, early_stopping_rounds=120)
prediction1 = model.predict(xgb_test,ntree_limit=model.best_iteration)
train = train[::-1,:]
labels = np.log(labels[::-1])
xgb_train = xgb.DMatrix(train[offset:,:], label=labels[offset:])
xgb_value = xgb.DMatrix(train[:offset,:], label=labels[:offset])
listforprint = [(xgb_train, 'train'),(xgb_value, 'value')]
model = xgb.train(paramslist, xgb_train, num_rounds, listforprint, early_stopping_rounds=120)
prediction2 = model.predict(xgb_test,ntree_limit=model.best_iteration)
prediction =(prediction1)*1.4 +(prediction2)*8.6
return prediction
train = pd.read_csv('.. /input/train.csv', index_col=0)
test = pd.read_csv('.. /input/test.csv', index_col=0)
labels = train.Hazard
train.drop('Hazard', axis=1, inplace=True)
train_temp = train
test_temp = test
train_temp.drop('T2_V10', axis=1, inplace=True)
train_temp.drop('T2_V7', axis=1, inplace=True)
train_temp.drop('T1_V13', axis=1, inplace=True)
train_temp.drop('T1_V10', axis=1, inplace=True)
test_temp.drop('T2_V10', axis=1, inplace=True)
test_temp.drop('T2_V7', axis=1, inplace=True)
test_temp.drop('T1_V13', axis=1, inplace=True)
test_temp.drop('T1_V10', axis=1, inplace=True)
columns = train.columns
test_index = test.index
train_temp = np.array(train_temp)
test_temp = np.array(test_temp)
for i in range(train_temp.shape[1]):
le = preprocessing.LabelEncoder()
le.fit(list(train_temp[:,i])+ list(test_temp[:,i]))
train_temp[:,i] = le.transform(train_temp[:,i])
test_temp[:,i] = le.transform(test_temp[:,i])
train_temp = train_temp.astype(float)
test_temp = test_temp.astype(float)
prediction1 = xgboost_prediction(train_temp,labels,test_temp)
train = train.T.to_dict().values()
test = test.T.to_dict().values()
vec = DictVectorizer()
train = vec.fit_transform(train)
test = vec.transform(test)
prediction2 = xgboost_prediction(train,labels,test)
prediction = prediction1 + prediction2
prediction = pd.DataFrame({"Id": test_index, "Hazard": prediction})
prediction = prediction.set_index('Id')
prediction.to_csv('result.csv' ) | Liberty Mutual Group: Property Inspection Prediction |
8,310,908 | <SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-womens-tournament<categorify> | finish_data = 2014
lgb_num_leaves_max = 200
lgb_in_leaf = 10
lgb_lr = 0.001
lgb_bagging = 7
xgb_max_depth = 7
xgb_min_child_weight = 75
xgb_lr = 0.0004
xgb_num_boost_round_max = 3000
w_lgb = 0.4
w_xgb = 0.5
w_logreg = 1 - w_lgb - w_xgb
w_logreg | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | dataset_test=tf.data.Dataset.from_tensor_slices(( Xids_test,Xmask_test))
def map_func(input_ids,mask):
return {'input_ids':input_ids,'attention_mask':mask}
dataset_test=dataset_test.map(map_func)
dataset_test=dataset_test.batch(64 ).prefetch(1000 )<choose_model_class> | warnings.filterwarnings("ignore" ) | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | distil_bert = 'distilbert-base-uncased'
config = DistilBertConfig(dropout=0.4, attention_dropout=0.4)
config.output_hidden_states = False
transformer_model = TFDistilBertModel.from_pretrained(distil_bert, config = config)
input_ids_in = tf.keras.layers.Input(shape=(SEQ_length,), name='input_ids', dtype='int32')
input_masks_in = tf.keras.layers.Input(shape=(SEQ_length,), name='attention_mask', dtype='int32')
embedding_layer = transformer_model(input_ids_in, attention_mask=input_masks_in)[0]
X = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(100, return_sequences=True, dropout=0.4, recurrent_dropout=0.4))(embedding_layer)
X = tf.keras.layers.GlobalMaxPool1D()(X)
X = tf.keras.layers.Dense(64, activation='relu' )(X)
X = tf.keras.layers.Dropout(0.3 )(X)
X = tf.keras.layers.Dense(32, activation='relu' )(X)
X = tf.keras.layers.Dropout(0.3 )(X)
X = tf.keras.layers.Dense(1, activation='sigmoid' )(X)
model = tf.keras.Model(inputs=[input_ids_in, input_masks_in], outputs = X)
for layer in model.layers[:3]:
layer.trainable = False
<choose_model_class> | tourney_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WNCAATourneyCompactResults.csv')
if finish_data == 2014:
tourney_result = tourney_result[tourney_result['Season'] < 2015]
tourney_seed = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WNCAATourneySeeds.csv')
if finish_data == 2014:
tourney_seed = tourney_seed[tourney_seed['Season'] < 2015]
tourney_result = tourney_result.drop(['DayNum', 'WScore', 'LScore', 'WLoc', 'NumOT'], axis=1)
tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True)
tourney_result = tourney_result.drop('TeamID', axis=1)
tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True)
tourney_result = tourney_result.drop('TeamID', axis=1 ) | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | model.compile(loss=tf.keras.losses.BinaryCrossentropy() ,
optimizer='adam',metrics=[tf.keras.metrics.AUC() ,tf.keras.metrics.Precision() ,tf.keras.metrics.Recall()
] )<train_model> | def get_seed(x):
return int(x[1:3])
tourney_result['WSeed'] = tourney_result['WSeed'].map(lambda x: get_seed(x))
tourney_result['LSeed'] = tourney_result['LSeed'].map(lambda x: get_seed(x)) | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | history=model.fit(train,validation_data=val,epochs=10 )<save_to_csv> | season_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WRegularSeasonCompactResults.csv')
if finish_data == 2014:
season_result = season_result[season_result['Season'] < 2015]
season_win_result = season_result[['Season', 'WTeamID', 'WScore']]
season_lose_result = season_result[['Season', 'LTeamID', 'LScore']]
season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True)
season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True)
season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True)
season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | predictions=model.predict(dataset_test)
df_test['label']=predictions
df_test.to_csv('submission.csv',columns=['urlid','label'],index=False )<categorify> | tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
tourney_result.rename(columns={'Score':'WScoreT'}, inplace=True)
tourney_result = tourney_result.drop('TeamID', axis=1)
tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
tourney_result.rename(columns={'Score':'LScoreT'}, inplace=True)
tourney_result = tourney_result.drop('TeamID', axis=1 ) | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | input_x=tf.data.Dataset.from_tensor_slices(( Xids,Xmask,y))
def map_func(input_ids,mask,labels):
return {'input_ids':input_ids,'attention_mask':mask}
input_x=input_x.map(map_func)
input_x=input_x.shuffle(100000 ).batch(32 ).prefetch(1000)
y_true = y
y_true<predict_on_test> | tourney_win_result = tourney_result.drop(['Season', 'WTeamID', 'LTeamID'], axis=1)
tourney_win_result.rename(columns={'WSeed':'Seed1', 'LSeed':'Seed2', 'WScoreT':'ScoreT1', 'LScoreT':'ScoreT2'}, inplace=True ) | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | y_pred=model.predict(dataset)
y_pred
y_pred = np.round(y_pred)
y_pred
print(metrics.classification_report(y_true, y_pred))<import_modules> | tourney_lose_result = tourney_win_result.copy()
tourney_lose_result['Seed1'] = tourney_win_result['Seed2']
tourney_lose_result['Seed2'] = tourney_win_result['Seed1']
tourney_lose_result['ScoreT1'] = tourney_win_result['ScoreT2']
tourney_lose_result['ScoreT2'] = tourney_win_result['ScoreT1'] | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | import os
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_validate, GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score<define_variables> | tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2']
tourney_win_result['ScoreT_diff'] = tourney_win_result['ScoreT1'] - tourney_win_result['ScoreT2']
tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2']
tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2'] | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | FILEDIR = Path('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament' )<load_from_csv> | tourney_win_result['result'] = 1
tourney_lose_result['result'] = 0
train_df = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True)
train_df | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | sub = pd.read_csv(FILEDIR / 'MSampleSubmissionStage1_2020.csv', usecols=['ID'])
id_splited = sub['ID'].str.split('_', expand=True ).astype(int ).rename(columns={0: 'Season', 1: 'Team1', 2: 'Team2'})
sub = pd.concat([sub, id_splited], axis=1 ).set_index(['Season', 'Team1', 'Team2'] ).sort_index()<count_duplicates> | test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv')
sub = test_df.copy() | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | tourney_teams = {}
tourney_teams_all = set()
for season in sub.index.get_level_values('Season' ).drop_duplicates() :
tourney_teams[season] = set()
tourney_teams[season].update(sub.loc[season].index.get_level_values('Team1'))
tourney_teams[season].update(sub.loc[season].index.get_level_values('Team2'))
tourney_teams_all.update(tourney_teams[season])
{k: len(v)for k, v in tourney_teams.items() }<load_from_csv> | test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4]))
test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9]))
test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | conferences = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeamConferences.csv')
conferences = pd.concat(
[conferences.query('Season == @season and TeamID in @teams')for season, teams in tourney_teams.items() ])
conferences = conferences.set_index(['Season', 'TeamID'] ).sort_index()<load_from_csv> | tourney_seed = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WNCAATourneySeeds.csv')
if finish_data == 2014:
tourney_seed = tourney_seed[tourney_seed['Season'] > 2014] | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | coaches = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeamCoaches.csv')
coaches = pd.concat(
[coaches.query('Season == @season and TeamID in @team')for season, team in tourney_teams.items() ])
coaches = coaches[coaches['LastDayNum'] == 154].set_index(['Season', 'TeamID'] ).sort_index() [['CoachName']]<load_from_csv> | season_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WRegularSeasonCompactResults.csv')
if finish_data == 2014:
season_result = season_result[season_result['Season'] > 2014]
season_win_result = season_result[['Season', 'WTeamID', 'WScore']]
season_lose_result = season_result[['Season', 'LTeamID', 'LScore']]
season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True)
season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True)
season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True)
season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() | Google Cloud & NCAA® ML Competition 2020-NCAAW |
8,310,908 | teams = pd.read_csv(FILEDIR / 'MDataFiles_Stage1/MTeams.csv', usecols=['TeamID', 'FirstD1Season'])
teams['FirstD1Season'] = 2020 - teams['FirstD1Season']
teams = pd.concat(
[teams.query('TeamID in @team' ).assign(Season=season)for season, team in tourney_teams.items() ])
teams = teams.set_index(['Season', 'TeamID'] ).sort_index()<load_from_csv> | test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Seed':'Seed1'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Seed':'Seed2'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df = pd.merge(test_df, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Score':'ScoreT1'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df = pd.merge(test_df, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Score':'ScoreT2'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1 ) | Google Cloud & NCAA® ML Competition 2020-NCAAW |