kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,016,864
train_data=pd.read_csv('/kaggle/input/DontGetKicked/training.csv') train_data.head()<load_from_csv>
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
test_data=pd.read_csv('/kaggle/input/DontGetKicked/test.csv') test_data.head()<count_missing_values>
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data.isnull().sum() <count_values>
class CatbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 if self.task == "regression": model = CatBoostRegressor(**self.params) elif self.task == "classification": model = CatBoostClassifier(**self.params) model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']), verbose=verbosity, cat_features=self.categoricals) return model, model.get_feature_importance() def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'task_type': "CPU", 'learning_rate': 0.01, 'iterations': 1000, 'random_seed': 42, 'use_best_model': True } if self.task == "regression": params["loss_function"] = "RMSE" elif self.task == "classification": params["loss_function"] = "Logloss" return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['IsBadBuy'].value_counts()<count_values>
class Mish(Layer): def __init__(self, **kwargs): super(Mish, self ).__init__(**kwargs) def build(self, input_shape): super(Mish, self ).build(input_shape) def call(self, x): return x * K.tanh(K.softplus(x)) def compute_output_shape(self, input_shape): return input_shape class LayerNormalization(keras.layers.Layer): def __init__(self, center=True, scale=True, epsilon=None, gamma_initializer='ones', beta_initializer='zeros', gamma_regularizer=None, beta_regularizer=None, gamma_constraint=None, beta_constraint=None, **kwargs): super(LayerNormalization, self ).__init__(**kwargs) self.supports_masking = True self.center = center self.scale = scale if epsilon is None: epsilon = K.epsilon() * K.epsilon() self.epsilon = epsilon self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_constraint = keras.constraints.get(gamma_constraint) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma, self.beta = None, None def get_config(self): config = { 'center': self.center, 'scale': self.scale, 'epsilon': self.epsilon, 'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), 'beta_initializer': keras.initializers.serialize(self.beta_initializer), 'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), 'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), 'gamma_constraint': keras.constraints.serialize(self.gamma_constraint), 'beta_constraint': keras.constraints.serialize(self.beta_constraint), } base_config = super(LayerNormalization, self ).get_config() return dict(list(base_config.items())+ list(config.items())) def compute_output_shape(self, input_shape): return input_shape def compute_mask(self, inputs, input_mask=None): return input_mask def build(self, input_shape): shape = input_shape[-1:] if self.scale: self.gamma = self.add_weight( shape=shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, name='gamma', ) if self.center: self.beta = self.add_weight( shape=shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, name='beta', ) super(LayerNormalization, self ).build(input_shape) def call(self, inputs, training=None): mean = K.mean(inputs, axis=-1, keepdims=True) variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) std = K.sqrt(variance + self.epsilon) outputs =(inputs - mean)/ std if self.scale: outputs *= self.gamma if self.center: outputs += self.beta return outputs
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['Model'].value_counts()<count_values>
class NeuralNetworkModel(BaseModel): def train_model(self, train_set, val_set): inputs = [] embeddings = [] embedding_out_dim = self.params['embedding_out_dim'] n_neuron = self.params['hidden_units'] for i in self.categoricals: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(self.train_df[i] ).max() + 1), embedding_out_dim, input_length=1 )(input_) embedding = Reshape(target_shape=(embedding_out_dim,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(self.features)- len(self.categoricals),)) embedding_numeric = Dense(n_neuron )(input_numeric) embedding_numeric = Mish()(embedding_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) for i in np.arange(self.params['hidden_layers'] - 1): x = Dense(n_neuron //(2 *(i+1)) )(x) x = Mish()(x) x = Dropout(self.params['hidden_dropout'] )(x) x = LayerNormalization()(x) if self.task == "regression": out = Dense(1, activation="linear", name = "out" )(x) loss = "mse" elif self.task == "classification": out = Dense(1, activation='sigmoid', name = 'out' )(x) loss = "binary_crossentropy" model = Model(inputs=inputs, outputs=out) model.compile(loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04)) er = EarlyStopping(patience=10, min_delta=1e-4, restore_best_weights=True, monitor='val_loss') ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR], epochs=self.params['epochs'], batch_size=self.params['batch_size'], validation_data=[val_set['X'], val_set['y']]) fi = np.zeros(len(self.features)) return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'input_dropout': 0.0, 'hidden_layers': 2, 'hidden_units': 128, 'embedding_out_dim': 4, 'hidden_activation': 'relu', 'hidden_dropout': 0.05, 'batch_norm': 'before_act', 'optimizer': {'type': 'adam', 'lr': 0.001}, 'batch_size': 128, 'epochs': 80 } return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data.drop('Model',axis=1,inplace=True) test_data.drop("Model",axis=1,inplace=True) train_data['Trim'].value_counts() <drop_column>
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'WTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data.drop('Trim',inplace=True,axis=1) test_data.drop('Trim',inplace=True,axis=1) <count_values>
data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3])) data_dict[fname].head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['SubModel'].value_counts()<count_values>
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data.drop('SubModel',inplace=True,axis=1) test_data.drop('SubModel',inplace=True,axis=1) train_data['Color'].value_counts()<count_missing_values>
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['Color'].fillna(value='Color_Unknown',inplace=True) test_data['Color'].fillna(value='Color_Unknown',inplace=True) print("Number of null values in Color column "+str(train_data['Color'].isnull().sum())) print("Number of null values in Color column "+str(test_data['Color'].isnull().sum())) train_data['Transmission'].value_counts() <filter>
gameCities = pd.merge(data_dict['WGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["WSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"]) train.head() cols_to_use = data_dict["WTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data[train_data['Transmission']=='Manual']<count_values>
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['Transmission'].replace("Manual","MANUAL",inplace=True) train_data['Transmission'].value_counts()<count_values>
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['Transmission'].fillna(value="Transmission_unk",inplace=True) test_data['Transmission'].fillna(value="Transmission_unk",inplace=True) train_data['WheelTypeID'].value_counts()<count_values>
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data.drop('WheelTypeID',inplace=True,axis=1) test_data.drop('WheelTypeID',inplace=True,axis=1) train_data['WheelType'].value_counts()<count_values>
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
test_data['WheelType'].value_counts()<count_values>
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['WheelType'].fillna(value='WheelType_unk',inplace=True) test_data['WheelType'].fillna(value='WheelType_unk',inplace=True) train_data['WheelType'].value_counts()<count_values>
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['Nationality'].value_counts()<count_values>
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore var_x'] df['y_var'] = df['WScore var_y'] + df['LScore var_y'] return df train = preprocess(train) test = preprocess(test )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
test_data['Nationality'].value_counts()<count_values>
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['Nationality'].fillna(value='Nationality_unk',inplace=True) test_data['Nationality'].fillna(value='Nationality_unk',inplace=True) train_data['Size'].value_counts()<count_values>
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
test_data['Size'].value_counts()<count_values>
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['Size'].fillna(value='Size_unk',inplace=True) test_data['Size'].fillna(value="Size_unk",inplace=True) train_data['TopThreeAmericanName'].value_counts()<count_values>
categoricals = ["TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
test_data['TopThreeAmericanName'].value_counts()<count_values>
target = 'result' features = data.columns.values.tolist() features.remove(target )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['TopThreeAmericanName'].fillna(value='Top_unk',inplace=True) test_data['TopThreeAmericanName'].fillna(value='Top_unk',inplace=True) train_data['PRIMEUNIT'].value_counts()<count_values>
nn = NeuralNetworkModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler="MinMax", verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
test_data['PRIMEUNIT'].value_counts()<data_type_conversions>
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data['PRIMEUNIT'].fillna(value="Prime_unk",inplace=True) test_data['PRIMEUNIT'].fillna(value="Prime_unk",inplace=True) train_data['AUCGUART'].replace("AGREEN","GREEN",inplace=True) test_data['AUCGUART'].replace("ARED","RED",inplace=True) train_data['AUCGUART'].fillna(value="AUC_unk",inplace=True) test_data['AUCGUART'].fillna(value="AUC_unk",inplace=True) train_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice', 'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice', 'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice', 'MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice'], inplace=True,axis=1) test_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice', 'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice', 'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice', 'MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice'], inplace=True,axis=1) train_data.drop('PurchDate',axis=1,inplace=True) test_data.drop("PurchDate",axis=1,inplace=True) train_data.shape <feature_engineering>
catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes!='object' not_categorical=train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes!='object'] for i in not_categorical: maximum=np.max(train_data[i]) train_data[i]=train_data[i]/maximum maximum_test=np.max(test_data[i]) test_data[i]=test_data[i]/maximum_test train_data[not_categorical].head() <drop_column>
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = 0.7 * lgbm.y_pred + 0.2 * catb.y_pred + 0.1 * nn.y_pred submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,016,864
categorical=train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes=='object'] categorical <filter>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
train_data[categorical[0]]<categorify>
path = '.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/' tourney_result = pd.read_csv(path + 'WDataFiles_Stage1/WNCAATourneyCompactResults.csv') tourney_seed = pd.read_csv(path + 'WDataFiles_Stage1/WNCAATourneySeeds.csv') season_result = pd.read_csv(path + 'WDataFiles_Stage1/WRegularSeasonCompactResults.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
pd.get_dummies(train_data[categorical[0]] )<categorify>
test_df= pd.read_csv(path +'WSampleSubmissionStage1_2020.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
for i in categorical: dummies=pd.get_dummies(train_data[i]) dummies.columns=str(i)+'_'+dummies.columns train_data=pd.concat([train_data,dummies],axis=1) train_data.drop(i,inplace=True,axis=1) dummies=pd.get_dummies(test_data[i]) dummies.columns=str(i)+'_'+dummies.columns test_data=pd.concat([test_data,dummies],axis=1) test_data.drop(i,inplace=True,axis=1) train_data.head() <feature_engineering>
tourney_result = tourney_result.drop(['DayNum', 'WScore', 'LScore', 'WLoc', 'NumOT'], axis=1) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
for i in train_data.drop('IsBadBuy',axis=1 ).columns: if i not in test_data.columns: test_data[i]=np.zeros(len(test_data)) for i in test_data.columns: if i not in train_data.columns: train_data[i]=np.zeros(len(train_data)) train_data.shape <split>
tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
X=train_data.drop(['RefId','IsBadBuy'],axis=1) y=train_data['IsBadBuy'] X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=42) print(X_train.shape,y_train.shape) print(X_test.shape,y_test.shape )<train_model>
def get_seed(x): return int(x[1:3]) tourney_result['WSeed'] = tourney_result['WSeed'].map(lambda x: get_seed(x)) tourney_result['LSeed'] = tourney_result['LSeed'].map(lambda x: get_seed(x)) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
KNN=KNeighborsClassifier(n_neighbors=11) KNN.fit(X_train,y_train )<compute_test_metric>
season_win_result = season_result[['Season', 'WTeamID', 'WScore']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True) season_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
KNN.score(X_test,y_test )<predict_on_test>
season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() season_score
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
predict=KNN.predict(test_data.drop('RefId',axis=1)) Submission=pd.DataFrame(data=predict,columns=['IsBadBuy']) Submission.head() <save_to_csv>
tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'WScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'LScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
Submission['RefId']=test_data['RefId'] Submission.set_index('RefId',inplace=True) Submission.head() Submission.to_csv('Submission.csv') <load_from_zip>
tourney_win_result = tourney_result.drop(['Season', 'WTeamID', 'LTeamID'], axis=1) tourney_win_result.rename(columns={'WSeed':'Seed1', 'LSeed':'Seed2', 'WScoreT':'ScoreT1', 'LScoreT':'ScoreT2'}, inplace=True) tourney_win_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
! tar xf.. /input/bird-songs-pad-and-resize-spectrogram/spectrograms_resized.tar.bz2<set_options>
tourney_lose_result = tourney_win_result.copy() tourney_lose_result['Seed1'] = tourney_win_result['Seed2'] tourney_lose_result['Seed2'] = tourney_win_result['Seed1'] tourney_lose_result['ScoreT1'] = tourney_win_result['ScoreT2'] tourney_lose_result['ScoreT2'] = tourney_win_result['ScoreT1'] tourney_lose_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
%reload_ext autoreload %autoreload 2 %matplotlib inline <define_variables>
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreT_diff'] = tourney_win_result['ScoreT1'] - tourney_win_result['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
data_dir = Path('.. /input') label_dir = data_dir/'multilabel-bird-species-classification-nips2013/nips4b_bird_challenge_train_labels/NIPS4B_BIRD_CHALLENGE_TRAIN_LABELS' spect_dir = Path('./spectrograms_resized' )<load_from_csv>
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
df = pd.read_csv(label_dir/'nips4b_birdchallenge_train_labels.csv', skiprows=[0, 2]) df.tail()<data_type_conversions>
test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
df.drop(df.columns[[1, 3]],axis=1,inplace=True) df.rename(columns={df.columns[0]:'file', df.columns[1]:'EMPTY'}, inplace=True) df = df[:-1] df.fillna(0, inplace=True) df = df.astype('int32', errors='ignore') df['file'] = df['file'].apply(lambda fn: str(Path(fn ).with_suffix(''))) df.tail()<normalization>
test_df['Seed1'] = test_df['Seed1'].map(lambda x: get_seed(x)) test_df['Seed2'] = test_df['Seed2'].map(lambda x: get_seed(x)) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
tfms = get_transforms(do_flip=False, max_rotate=None, max_warp=None) data =(src.transform(tfms, size=128) .databunch(num_workers=0 ).normalize(imagenet_stats))<define_variables>
tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 tourney_result = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True) tourney_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
data.show_batch(rows=3, figsize=(12,9), ds_type=DatasetType.Valid )<choose_model_class>
X_train = tourney_result.drop('result', axis=1) y_train = tourney_result.result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
arch = models.resnet50 acc_02 = partial(accuracy_thresh, thresh=0.2) learn = cnn_learner(data, arch, metrics=acc_02, path='.' )<train_model>
from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold import lightgbm as lgb import xgboost as xgb from xgboost import XGBClassifier import gc
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
lr = 2.29E-02 learn.fit_one_cycle(5, slice(lr))<save_model>
params_lgb = {'num_leaves': 400, 'min_child_weight': 0.034, 'feature_fraction': 0.379, 'bagging_fraction': 0.418, 'min_data_in_leaf': 106, 'objective': 'binary', 'max_depth': 50, 'learning_rate': 0.0068, "boosting_type": "gbdt", "bagging_seed": 11, "metric": 'logloss', "verbosity": -1, 'reg_alpha': 0.3899, 'reg_lambda': 0.648, 'random_state': 47, } params_xgb = {'colsample_bytree': 0.8, 'learning_rate': 0.0004, 'max_depth': 31, 'subsample': 1, 'objective':'binary:logistic', 'eval_metric':'logloss', 'min_child_weight':3, 'gamma':0.25, 'n_estimators':5000 }
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
learn.save('stage-1-rn50', return_path=True )<train_model>
NFOLDS = 200 folds = KFold(n_splits=NFOLDS) columns = X_train.columns splits = folds.split(X_train, y_train )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
learn.fit_one_cycle(5, slice(3.02E-03, lr/5))<save_model>
y_preds_lgb = np.zeros(test_df.shape[0]) y_oof_lgb = np.zeros(X_train.shape[0] )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
learn.save('stage-2-rn50' )<categorify>
for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train1, X_valid1 = X_train[columns].iloc[train_index], X_train[columns].iloc[valid_index] y_train1, y_valid1 = y_train.iloc[train_index], y_train.iloc[valid_index] dtrain = lgb.Dataset(X_train1, label=y_train1) dvalid = lgb.Dataset(X_valid1, label=y_valid1) clf = lgb.train(params_lgb, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) y_pred_valid = clf.predict(X_valid1) y_oof_lgb[valid_index] = y_pred_valid y_preds_lgb += clf.predict(test_df)/ NFOLDS del X_train1, X_valid1, y_train1, y_valid1 gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
data_256 =(src.transform(tfms, size=256) .databunch(num_workers=0 ).normalize(imagenet_stats)) learn.data = data_256 learn.data.train_ds[0][0].shape<train_model>
del X_train2, X_valid2, y_train2, y_valid2 gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
lr = 5E-03 learn.fit_one_cycle(5, slice(lr))<save_model>
submission_df = pd.read_csv(path + 'WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_preds_lgb submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,009,069
learn.save('stage-1-256-rn50' )<train_model>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
learn.fit_one_cycle(5, slice(1.58E-06, lr/5))<save_model>
import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold import lightgbm as lgb import xgboost as xgb from xgboost import XGBClassifier import gc import matplotlib.pyplot as plt from sklearn import preprocessing import pymc3 as pm import matplotlib.pyplot as plt import seaborn as sns
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
learn.save('stage-2-256-rn50' )<train_model>
Tourney_Compact_Results = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WNCAATourneyCompactResults.csv') Tourney_Seeds = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WNCAATourneySeeds.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
learn.fit_one_cycle(5, slice(1.58E-06, lr/5))<save_model>
RegularSeason_Compact_Results = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WRegularSeasonCompactResults.csv') MSeasons = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WSeasons.csv') MTeams=pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/WTeams.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
learn.save('stage-2-256-rn50-10e' )<define_variables>
Tourney_Results_Compact=pd.merge(Tourney_Compact_Results, Tourney_Seeds, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Seed':'WinningSeed'},inplace=True) Tourney_Results_Compact=Tourney_Results_Compact.drop(['TeamID'],axis=1) Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, Tourney_Seeds, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Seed':'LoosingSeed'}, inplace=True) Tourney_Results_Compact=Tourney_Results_Compact.drop(['TeamID','NumOT','WLoc'],axis=1) Tourney_Results_Compact
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
test = ImageList.from_folder(spect_dir/'test/') len(test )<predict_on_test>
Tourney_Results_Compact=Tourney_Results_Compact.drop(['WScore','LScore'],axis=1) Tourney_Results_Compact.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
predictor = load_learner('.', test=test, num_workers=0) preds, _ = predictor.get_preds(ds_type=DatasetType.Test) fpreds = preds[:, 1:].reshape(-1, )<define_variables>
Tourney_Results_Compact['WinningSeed'] = Tourney_Results_Compact['WinningSeed'].str.extract('(\d+)', expand=True) Tourney_Results_Compact['LoosingSeed'] = Tourney_Results_Compact['LoosingSeed'].str.extract('(\d+)', expand=True) Tourney_Results_Compact.WinningSeed = pd.to_numeric(Tourney_Results_Compact.WinningSeed, errors='coerce') Tourney_Results_Compact.LoosingSeed = pd.to_numeric(Tourney_Results_Compact.LoosingSeed, errors='coerce' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
names = [f.stem for f in predictor.data.test_ds.items] fnames = [x + '.wav_classnumber_' + str(i)for x in names for i in range(1, len(data.classes)) ]<save_to_csv>
season_winning_team = RegularSeason_Compact_Results[['Season', 'WTeamID', 'WScore']] season_losing_team = RegularSeason_Compact_Results[['Season', 'LTeamID', 'LScore']] season_winning_team.rename(columns={'WTeamID':'TeamID','WScore':'Score'}, inplace=True) season_losing_team.rename(columns={'LTeamID':'TeamID','LScore':'Score'}, inplace=True) RegularSeason_Compact_Results = pd.concat(( season_winning_team, season_losing_team)).reset_index(drop=True) RegularSeason_Compact_Results
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
test_df = pd.DataFrame({'ID':fnames, 'Probability':fpreds}, columns=['ID', 'Probability']) test_df.to_csv('submission.csv', index=False )<import_modules>
RegularSeason_Compact_Results_Final = RegularSeason_Compact_Results.groupby(['Season', 'TeamID'])['Score'].sum().reset_index() RegularSeason_Compact_Results_Final
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
import os import sys import operator import numpy as np import pandas as pd from scipy import sparse import xgboost as xgb from sklearn import model_selection, preprocessing, ensemble from sklearn.metrics import log_loss from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer <train_model>
Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, RegularSeason_Compact_Results_Final, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Score':'WScoreTotal'}, inplace=True) Tourney_Results_Compact
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
def runXGB(train_X, train_y, test_X, test_y=None,max_depth = 6, feature_names=None, seed_val=0, num_rounds=1000): param = {} param['objective'] = 'multi:softprob' param['eta'] = 0.15 param['max_depth'] = max_depth param['silent'] = 1 param['num_class'] = 3 param['eval_metric'] = "mlogloss" param['min_child_weight'] = 1 param['subsample'] = 0.7 param['colsample_bytree'] = 0.7 param['seed'] = seed_val num_rounds = num_rounds plst = list(param.items()) xgtrain = xgb.DMatrix(train_X, label=train_y) if test_y is not None: xgtest = xgb.DMatrix(test_X, label=test_y) watchlist = [(xgtrain,'train'),(xgtest, 'test')] model = xgb.train(plst, xgtrain, num_rounds, watchlist, early_stopping_rounds=20) else: xgtest = xgb.DMatrix(test_X) model = xgb.train(plst, xgtrain, num_rounds) pred_test_y = model.predict(xgtest) return pred_test_y, model <load_from_disk>
Tourney_Results_Compact = Tourney_Results_Compact.drop('TeamID', axis=1) Tourney_Results_Compact = pd.merge(Tourney_Results_Compact, RegularSeason_Compact_Results_Final, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') Tourney_Results_Compact.rename(columns={'Score':'LScoreTotal'}, inplace=True) Tourney_Results_Compact = Tourney_Results_Compact.drop('TeamID', axis=1) Tourney_Results_Compact=Tourney_Results_Compact[Tourney_Results_Compact['Season'] < 2016] Tourney_Results_Compact
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
data_path = ".. /input/" train_file = data_path + "train.json" test_file = data_path + "test.json" train_df = pd.read_json(train_file) test_df = pd.read_json(test_file) print(train_df.shape) print(test_df.shape) <define_variables>
Tourney_Win_Results=Tourney_Results_Compact.drop(['Season','WTeamID','LTeamID','DayNum'],axis=1) Tourney_Win_Results
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
features_to_use = ["bathrooms", "bedrooms", "latitude", "longitude", "price"] <feature_engineering>
Tourney_Win_Results.rename(columns={'WinningSeed':'Seed1', 'LoosingSeed':'Seed2', 'WScoreTotal':'ScoreT1', 'LScoreTotal':'ScoreT2'}, inplace=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
train_df["num_photos"] = train_df["photos"].apply(len) test_df["num_photos"] = test_df["photos"].apply(len) train_df["num_features"] = train_df["features"].apply(len) test_df["num_features"] = test_df["features"].apply(len) train_df["num_description_words"] = train_df["description"].apply(lambda x: len(x.split(" "))) test_df["num_description_words"] = test_df["description"].apply(lambda x: len(x.split(" "))) train_df["created"] = pd.to_datetime(train_df["created"]) test_df["created"] = pd.to_datetime(test_df["created"]) train_df["created_year"] = train_df["created"].dt.year test_df["created_year"] = test_df["created"].dt.year train_df["created_month"] = train_df["created"].dt.month test_df["created_month"] = test_df["created"].dt.month train_df["created_day"] = train_df["created"].dt.day test_df["created_day"] = test_df["created"].dt.day train_df["created_hour"] = train_df["created"].dt.hour test_df["created_hour"] = test_df["created"].dt.hour features_to_use.extend(["num_photos", "num_features", "num_description_words","created_year", "created_month", "created_day", "listing_id", "created_hour"]) <categorify>
tourney_lose_result = Tourney_Win_Results.copy() tourney_lose_result['Seed1'] = Tourney_Win_Results['Seed2'] tourney_lose_result['Seed2'] = Tourney_Win_Results['Seed1'] tourney_lose_result['ScoreT1'] = Tourney_Win_Results['ScoreT2'] tourney_lose_result['ScoreT2'] = Tourney_Win_Results['ScoreT1'] tourney_lose_result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
categorical = ["display_address", "manager_id", "building_id", "street_address"] for f in categorical: if train_df[f].dtype=='object': lbl = preprocessing.LabelEncoder() lbl.fit(list(train_df[f].values)+ list(test_df[f].values)) train_df[f] = lbl.transform(list(train_df[f].values)) test_df[f] = lbl.transform(list(test_df[f].values)) features_to_use.append(f) <feature_engineering>
Tourney_Win_Results['Seed_diff'] = Tourney_Win_Results['Seed1'] - Tourney_Win_Results['Seed2'] Tourney_Win_Results['ScoreT_diff'] = Tourney_Win_Results['ScoreT1'] - Tourney_Win_Results['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
train_df['features'] = train_df["features"].apply(lambda x: " ".join(["_".join(i.split(" ")) for i in x])) test_df['features'] = test_df["features"].apply(lambda x: " ".join(["_".join(i.split(" ")) for i in x])) print(train_df["features"].head()) tfidf = CountVectorizer(stop_words='english', max_features=200) tr_sparse = tfidf.fit_transform(train_df["features"]) te_sparse = tfidf.transform(test_df["features"]) <prepare_x_and_y>
Tourney_Win_Results['result'] = 1 tourney_lose_result['result'] = 0 tourney_result_Final = pd.concat(( Tourney_Win_Results, tourney_lose_result)).reset_index(drop=True) tourney_result_Final.to_csv('Tourneyvalidate.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
train_X = sparse.hstack([train_df[features_to_use], tr_sparse] ).tocsr() test_X = sparse.hstack([test_df[features_to_use], te_sparse] ).tocsr() target_num_map = {'high':0, 'medium':1, 'low':2} train_y = np.array(train_df['interest_level'].apply(lambda x: target_num_map[x])) print(train_X.shape, test_X.shape) <compute_train_metric>
tourney_result_Final1 = tourney_result_Final[[ 'Seed1', 'Seed2', 'ScoreT1', 'ScoreT2', 'Seed_diff', 'ScoreT_diff', 'result']]
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
cv_scores = [] kf = model_selection.KFold(n_splits=5, shuffle=True, random_state=2016) for max_depth in [7,8]: for dev_index, val_index in kf.split(range(train_X.shape[0])) : dev_X, val_X = train_X[dev_index,:], train_X[val_index,:] dev_y, val_y = train_y[dev_index], train_y[val_index] preds, model = runXGB(dev_X, dev_y, val_X, val_y,max_depth) cv_scores.append(log_loss(val_y, preds)) print(cv_scores) break <save_to_csv>
tourney_result_Final1.loc[lambda x:(x['Seed1'].isin([14,15,16])) &(x['Seed2'].isin([1,2,3])) ,'result' ] = 0
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
<set_options>
test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
warnings.simplefilter(action='ignore', category=FutureWarning) gc.enable()<load_from_csv>
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14])) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )<load_from_csv>
test_df = pd.merge(test_df, Tourney_Seeds, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, Tourney_Seeds, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )<data_type_conversions>
test_df = pd.merge(test_df, RegularSeason_Compact_Results_Final, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, RegularSeason_Compact_Results_Final, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df test_df.to_csv('test_df_Test.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
def pre_pro(df): df = df.astype('float32') col = df.columns for i in range(len(col)) : m = df.loc[df[col[i]] != -np.inf, col[i]].min() df[col[i]].replace(-np.inf,m,inplace=True) M = df.loc[df[col[i]] != np.inf, col[i]].max() df[col[i]].replace(np.inf,M,inplace=True) df.fillna(0, inplace = True) return df<feature_engineering>
test_df['Seed1'] = test_df['Seed1'].str.extract('(\d+)', expand=True) test_df['Seed2'] = test_df['Seed2'].str.extract('(\d+)', expand=True) test_df.Seed1 = pd.to_numeric(test_df.Seed1, errors='coerce') test_df.Seed2 = pd.to_numeric(test_df.Seed2, errors='coerce' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
def feat_eng(df): df.replace(0, 0.001) df['follower_diff'] =(df['A_follower_count'] > df['B_follower_count']) df['following_diff'] =(df['A_following_count'] > df['B_following_count']) df['listed_diff'] =(df['A_listed_count'] > df['B_listed_count']) df['ment_rec_diff'] =(df['A_mentions_received'] > df['B_mentions_received']) df['rt_rec_diff'] =(df['A_retweets_received'] > df['B_retweets_received']) df['ment_sent_diff'] =(df['A_mentions_sent'] > df['B_mentions_sent']) df['rt_sent_diff'] =(df['A_retweets_sent'] > df['B_retweets_sent']) df['posts_diff'] =(df['A_posts'] > df['B_posts']) df['A_pop_ratio'] = df['A_mentions_sent']/df['A_listed_count'] df['A_foll_ratio'] = df['A_follower_count']/df['A_following_count'] df['A_ment_ratio'] = df['A_mentions_sent']/df['A_mentions_received'] df['A_rt_ratio'] = df['A_retweets_sent']/df['A_retweets_received'] df['B_pop_ratio'] = df['B_mentions_sent']/df['B_listed_count'] df['B_foll_ratio'] = df['B_follower_count']/df['B_following_count'] df['B_ment_ratio'] = df['B_mentions_sent']/df['B_mentions_received'] df['B_rt_ratio'] = df['B_retweets_sent']/df['B_retweets_received'] df['A/B_foll_ratio'] =(df['A_foll_ratio'] > df['B_foll_ratio']) df['A/B_ment_ratio'] =(df['A_ment_ratio'] > df['B_ment_ratio']) df['A/B_rt_ratio'] =(df['A_rt_ratio'] > df['B_rt_ratio']) df['nf1_diff'] =(df['A_network_feature_1'] > df['B_network_feature_1']) df['nf2_diff'] =(df['A_network_feature_2'] > df['B_network_feature_2']) df['nf3_diff'] =(df['A_network_feature_3'] > df['B_network_feature_3']) df['nf3_ratio'] = df['A_network_feature_3'] / df['B_network_feature_3'] df['nf2_ratio'] = df['A_network_feature_2'] / df['B_network_feature_2'] df['nf1_ratio'] = df['A_network_feature_1'] / df['B_network_feature_1'] return(pre_pro(df)) <categorify>
test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
fe_train = feat_eng(train.copy()) fe_test = feat_eng(test.copy() )<prepare_x_and_y>
X = tourney_result_Final1.drop('result', axis=1) y = tourney_result_Final1.result
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
train_df = fe_train test_df = fe_test y_train = np.array(train_df['Choice'] )<define_variables>
df = pd.concat([X, test_df], axis=0, sort=False ).reset_index(drop=True) df_log = pd.DataFrame( preprocessing.MinMaxScaler().fit_transform(df), columns=df.columns, index=df.index ) train_log, test_log = df_log.iloc[:len(X),:], df_log.iloc[len(X):,:].reset_index(drop=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
target = 'Choice' predictors = train_df.columns.values.tolist() [1:]<init_hyperparams>
logreg = LogisticRegression() logreg.fit(train_log, y) coeff_logreg = pd.DataFrame(train_log.columns.delete(0)) coeff_logreg.columns = ['feature'] coeff_logreg["score_logreg"] = pd.Series(logreg.coef_[0]) coeff_logreg.sort_values(by='score_logreg', ascending=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
param_lgb = { 'feature_fraction': 0.4647875434283183, 'lambda_l1': 0.14487098904632512, 'lambda_l2': 0.9546002933329684, 'learning_rate': 0.050592093295320606, 'max_depth': int(round(7.696194993998026)) , 'min_data_in_leaf': int(round(9.879507661608065)) , 'min_gain_to_split': 0.7998292013880356, 'min_sum_hessian_in_leaf': 0.24962103361366683, 'num_leaves': int(round(2.854239951949671)) , 'max_bin': 63, 'bagging_fraction': 1.0, 'bagging_freq': 5, 'save_binary': True, 'seed': 1965, 'feature_fraction_seed': 1965, 'bagging_seed': 1965, 'drop_seed': 1965, 'data_random_seed': 1965, 'objective': 'binary', 'boosting_type': 'gbdt', 'verbose': 1, 'metric': 'auc', 'is_unbalance': True, 'boost_from_average': False}<split>
y_logreg_train = logreg.predict(train_log) y_logreg_pred = logreg.predict_proba(test_log )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
nfold = 20 skf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=2019) oof = np.zeros(len(fe_train)) predictions = np.zeros(( len(fe_test),nfold)) i = 1 for train_index, valid_index in skf.split(fe_train, fe_train.Choice.values): print(" fold {}".format(i)) xg_train = lgb.Dataset(fe_train.iloc[train_index][predictors].values, label=fe_train.iloc[train_index][target].values, feature_name=predictors, free_raw_data = False ) xg_valid = lgb.Dataset(fe_train.iloc[valid_index][predictors].values, label=fe_train.iloc[valid_index][target].values, feature_name=predictors, free_raw_data = False ) clf = lgb.train(param_lgb, xg_train, 10000000, valid_sets = [xg_valid], verbose_eval=250, early_stopping_rounds = 100) oof[valid_index] = clf.predict(fe_train.iloc[valid_index][predictors].values, num_iteration=clf.best_iteration) predictions[:,i-1] += clf.predict(fe_test[predictors], num_iteration=clf.best_iteration) i = i + 1 print(" CV AUC: {:<0.8f}".format(metrics.roc_auc_score(fe_train.Choice.values, oof)) )<groupby>
clf = RandomForestClassifier(n_estimators=200,max_depth=50) clf.fit(train_log, y) clf_probs = clf.predict_proba(test_log )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
lgb_bay = [] for i in range(len(predictions)) : lgb_bay.append(predictions[i][-1] )<save_to_csv>
y_pred_df_random = pd.DataFrame(clf_probs) y_pred_1 = y_pred_df_random.iloc[:,[1]] y_pred_df_random
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
submission = pd.read_csv('.. /input/sample_predictions.csv') submission['Choice'] = lgb_bay submission.to_csv('sub.csv', index = False, header = True )<import_modules>
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = y_pred_1 submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,249,926
<set_options><EOS>
submission_df.to_csv('submission_New8.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-womens-tournament<load_from_csv>
pd.set_option('max_columns', None) plt.style.use('fivethirtyeight') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore') print("Libraries imported!" )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
training_data_original = pd.read_csv('/kaggle/input/inputs/train.csv') training_data_original['Date'] = pd.to_datetime(training_data_original['Date']) training_data = training_data_original[training_data_original['Country_Region'] != 'Diamond Princess'] countries = training_data['Country_Region'].unique() countries_main = ['China', 'US', 'Australia', 'Canada'] states = training_data['Province_State'].unique()<load_from_csv>
class BaseModel(object): def __init__(self, train_df, test_df, target, features, categoricals=[], n_splits=3, cv_method="KFold", group=None, task="regression", parameter_tuning=False, scaler=None, verbose=True): self.train_df = train_df self.test_df = test_df self.target = target self.features = features self.n_splits = n_splits self.categoricals = categoricals self.cv_method = cv_method self.group = group self.task = task self.parameter_tuning = parameter_tuning self.scaler = scaler self.cv = self.get_cv() self.verbose = verbose self.params = self.get_params() self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit() def train_model(self, train_set, val_set): raise NotImplementedError def get_params(self): raise NotImplementedError def convert_dataset(self, x_train, y_train, x_val, y_val): raise NotImplementedError def convert_x(self, x): return x def calc_metric(self, y_true, y_pred): if self.task == "classification": return log_loss(y_true, y_pred) elif self.task == "regression": return np.sqrt(mean_squared_error(y_true, y_pred)) def get_cv(self): if self.cv_method == "KFold": cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df) elif self.cv_method == "StratifiedKFold": cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target]) elif self.cv_method == "TimeSeriesSplit": cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits) return cv.split(self.train_df) elif self.cv_method == "GroupKFold": cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) elif self.cv_method == "StratifiedGroupKFold": cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42) return cv.split(self.train_df, self.train_df[self.target], self.group) def fit(self): oof_pred = np.zeros(( self.train_df.shape[0],)) y_vals = np.zeros(( self.train_df.shape[0],)) y_pred = np.zeros(( self.test_df.shape[0],)) if self.group is not None: if self.group in self.features: self.features.remove(self.group) if self.group in self.categoricals: self.categoricals.remove(self.group) fi = np.zeros(( self.n_splits, len(self.features))) if self.scaler is not None: numerical_features = [f for f in self.features if f not in self.categoricals] self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median()) self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median()) self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0]) self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0]) if self.scaler == "MinMax": scaler = MinMaxScaler() elif self.scaler == "Standard": scaler = StandardScaler() df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True) scaler.fit(df[numerical_features]) x_test = self.test_df.copy() x_test[numerical_features] = scaler.transform(x_test[numerical_features]) x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]] else: x_test = self.test_df[self.features] for fold,(train_idx, val_idx)in enumerate(self.cv): x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features] y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target] if self.scaler is not None: x_train[numerical_features] = scaler.transform(x_train[numerical_features]) x_val[numerical_features] = scaler.transform(x_val[numerical_features]) x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]] x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]] train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val) model, importance = self.train_model(train_set, val_set) fi[fold, :] = importance conv_x_val = self.convert_x(x_val) y_vals[val_idx] = y_val oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape) x_test = self.convert_x(x_test) y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx]))) fi_df = pd.DataFrame() for n in np.arange(self.n_splits): tmp = pd.DataFrame() tmp["features"] = self.features tmp["importance"] = fi[n, :] tmp["fold"] = n fi_df = pd.concat([fi_df, tmp], ignore_index=True) gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index() fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean')) loss_score = self.calc_metric(self.train_df[self.target], oof_pred) if self.verbose: print('Our oof loss score is: ', loss_score) return y_pred, loss_score, model, oof_pred, y_vals, fi_df def plot_feature_importance(self, rank_range=[1, 50]): fig, ax = plt.subplots(1, 1, figsize=(10, 20)) sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]] sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h') ax.set_xlabel("feature importance") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) return sorted_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
state_metadata = pd.read_excel('/kaggle/input/external-data/states.xlsx') country_metadata = pd.read_excel('/kaggle/input/external-data/countries.xlsx' )<groupby>
class LgbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity) fi = model.feature_importance(importance_type="gain") return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals) val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals) return train_set, val_set def get_params(self): params = { 'num_leaves': 127, 'min_data_in_leaf': 50, 'max_depth': -1, 'learning_rate': 0.005, "boosting_type": "gbdt", "bagging_seed": 11, "verbosity": -1, 'random_state': 42, } if self.task == "regression": params["objective"] = "regression" params["metric"] = "rmse" elif self.task == "classification": params["objective"] = "binary" params["metric"] = "binary_logloss" if self.parameter_tuning == True: def objective(trial): train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features], self.train_df[self.target], test_size=0.3, random_state=42) dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals) dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals) hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024), 'boosting_type': 'gbdt', 'objective': params["objective"], 'metric': params["metric"], 'max_depth': trial.suggest_int('max_depth', 4, 16), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 20), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'early_stopping_rounds': 100 } model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500) pred = model.predict(test_x) if self.task == "classification": return log_loss(test_y, pred) elif self.task == "regression": return np.sqrt(mean_squared_error(test_y, pred)) study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials: {}'.format(len(study.trials))) print('Best trial:') trial = study.best_trial print(' Value: {}'.format(trial.value)) print(' Params: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value)) params = trial.params params["learning_rate"] = 0.001 plot_optimization_history(study) return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
def data_preparation(training_data, countries, states, country_metadata, state_metadata, n_days_case, n_days_fatal, min_num_cases = 2): training_data_trun = training_data[training_data['ConfirmedCases'] >= min_num_cases] conf_cases_dict = {} fatal_dict = {} for country in countries: if country not in countries_main: training_data_trun_loc = training_data_trun[(training_data_trun['Country_Region'] == country)&(pd.isnull(training_data_trun.Province_State)) ] training_data_trun_loc = training_data_trun_loc.groupby(['Date'] ).sum() training_data_trun_loc = training_data_trun_loc.sort_values(by = 'Date') if len(training_data_trun_loc['ConfirmedCases'].values)>= n_days_case: conf_cases_dict[country] = training_data_trun_loc['ConfirmedCases'].values[:n_days_case] / country_metadata[country_metadata['Countries'] == country]['Population'].values[0] if len(training_data_trun_loc['Fatalities'].values)>= n_days_fatal: fatal_dict[country] = training_data_trun_loc['Fatalities'].values[:n_days_fatal] for state in states: training_data_trun_loc = training_data_trun[training_data_trun['Province_State'] == state] training_data_trun_loc = training_data_trun_loc.groupby(['Date'] ).sum() training_data_trun_loc = training_data_trun_loc.sort_values(by = 'Date') if len(training_data_trun_loc['ConfirmedCases'].values)>= n_days_case: conf_cases_dict[state] = training_data_trun_loc['ConfirmedCases'].values[:n_days_case] / state_metadata[state_metadata['States'] == state]['Population'].values[0] if len(training_data_trun_loc['Fatalities'].values)>= n_days_fatal: fatal_dict[state] = training_data_trun_loc['Fatalities'].values[:n_days_fatal] return pd.DataFrame(conf_cases_dict), pd.DataFrame(fatal_dict )<train_on_grid>
class CatbModel(BaseModel): def train_model(self, train_set, val_set): verbosity = 100 if self.verbose else 0 if self.task == "regression": model = CatBoostRegressor(**self.params) elif self.task == "classification": model = CatBoostClassifier(**self.params) model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']), verbose=verbosity, cat_features=self.categoricals) return model, model.get_feature_importance() def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'task_type': "CPU", 'learning_rate': 0.01, 'iterations': 1000, 'random_seed': 42, 'use_best_model': True } if self.task == "regression": params["loss_function"] = "RMSE" elif self.task == "classification": params["loss_function"] = "Logloss" return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
def fts_training(input_df, rank = 3): matrix = input_df.values u, s, v = np.linalg.svd(matrix, full_matrices=False) scores = np.matmul(u[:, :rank], np.diag(s[:rank])) pcs = v[:rank, :] return scores, pcs<create_dataframe>
class Mish(Layer): def __init__(self, **kwargs): super(Mish, self ).__init__(**kwargs) def build(self, input_shape): super(Mish, self ).build(input_shape) def call(self, x): return x * K.tanh(K.softplus(x)) def compute_output_shape(self, input_shape): return input_shape class LayerNormalization(keras.layers.Layer): def __init__(self, center=True, scale=True, epsilon=None, gamma_initializer='ones', beta_initializer='zeros', gamma_regularizer=None, beta_regularizer=None, gamma_constraint=None, beta_constraint=None, **kwargs): super(LayerNormalization, self ).__init__(**kwargs) self.supports_masking = True self.center = center self.scale = scale if epsilon is None: epsilon = K.epsilon() * K.epsilon() self.epsilon = epsilon self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_constraint = keras.constraints.get(gamma_constraint) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma, self.beta = None, None def get_config(self): config = { 'center': self.center, 'scale': self.scale, 'epsilon': self.epsilon, 'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), 'beta_initializer': keras.initializers.serialize(self.beta_initializer), 'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), 'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), 'gamma_constraint': keras.constraints.serialize(self.gamma_constraint), 'beta_constraint': keras.constraints.serialize(self.beta_constraint), } base_config = super(LayerNormalization, self ).get_config() return dict(list(base_config.items())+ list(config.items())) def compute_output_shape(self, input_shape): return input_shape def compute_mask(self, inputs, input_mask=None): return input_mask def build(self, input_shape): shape = input_shape[-1:] if self.scale: self.gamma = self.add_weight( shape=shape, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, name='gamma', ) if self.center: self.beta = self.add_weight( shape=shape, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, name='beta', ) super(LayerNormalization, self ).build(input_shape) def call(self, inputs, training=None): mean = K.mean(inputs, axis=-1, keepdims=True) variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) std = K.sqrt(variance + self.epsilon) outputs =(inputs - mean)/ std if self.scale: outputs *= self.gamma if self.center: outputs += self.beta return outputs
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
def forecast_trajectories(training_data, countries, states, country_metadata, state_metadata, loc = None, n_days_case = 30, n_days_fatal = 10, forecast_days = 10, min_num_cases = 2, model_type = 'ARIMA', components_modeling = True, rank = 3): kf = KalmanFilter(initial_state_mean = np.zeros(rank ).tolist() , n_dim_obs = rank) conf_cases_df, fatal_df = data_preparation(training_data, countries, states, country_metadata, state_metadata, n_days_case = n_days_case, n_days_fatal = n_days_case, min_num_cases = min_num_cases) pred_countries = conf_cases_df.columns.tolist() pred_countries_fatal = fatal_df.columns.tolist() conf_cases_exog_df, fatal_exog_df = data_preparation(training_data, countries, states, country_metadata, state_metadata, n_days_case = n_days_case + forecast_days, n_days_fatal = n_days_case + forecast_days, min_num_cases = min_num_cases) scores_exog, pcs_exog = fts_training(conf_cases_exog_df, rank = rank) if len(scores_exog)> 0: scores_exog = kf.em(scores_exog ).smooth(scores_exog)[0] scores, pcs = fts_training(conf_cases_df, rank = rank) forecasted_scores = [] idx = 0 for score in scores.T: if components_modeling: exog = scores_exog[:n_days_case, idx] if len(scores_exog)> 0 else None pred_exog = scores_exog[n_days_case:, idx] if len(scores_exog)> 0 else None y = score else: exog = scores_exog[:n_days_case, :] if len(scores_exog)> 0 else None pred_exog = scores_exog[n_days_case:, :] if len(scores_exog)> 0 else None y = conf_cases_df[loc].values try: model = arima.ARIMA(endog = y, exog = exog, order =(4, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: try: model = arima.ARIMA(endog = y, exog = exog, order =(3, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: try: model = arima.ARIMA(endog = y, exog = exog, order =(2, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: model = arima.ARIMA(endog = y, exog = exog, order =(1, 0, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) if not components_modeling: pred_traj = model.forecast(steps = forecast_days, alpha = 0.001, exog = pred_exog)[0] break else: forecasted_scores.append(model.forecast(steps = forecast_days, alpha = 0.001, exog = pred_exog)[0].tolist()) idx = idx + 1 if components_modeling: pred_traj = np.matmul(np.array(forecasted_scores ).T, pcs) pred_traj_df = pd.DataFrame(pred_traj, columns = pred_countries) for loc in pred_countries: if loc in country_metadata['Countries'].values.tolist() : pred_traj_df[loc] = country_metadata[country_metadata['Countries'] == loc]['Population'].values[0] * pred_traj_df[loc] if loc in state_metadata['States'].values.tolist() : pred_traj_df[loc] = state_metadata[state_metadata['States'] == loc]['Population'].values[0] * pred_traj_df[loc] else: pred_traj_df = pd.DataFrame() if loc in countries: pred_traj_df[loc] = country_metadata[country_metadata['Countries'] == loc]['Population'].values[0] * pred_traj elif loc in states: pred_traj_df[loc] = state_metadata[state_metadata['States'] == loc]['Population'].values[0] * pred_traj fatal_scores_exog, fatal_pcs_exog = fts_training(fatal_exog_df, rank = rank) if len(fatal_pcs_exog)> 0: fatal_scores_exog = kf.em(fatal_scores_exog ).smooth(fatal_scores_exog)[0] fatal_scores, fatal_pcs = fts_training(fatal_df, rank = rank) forecasted_fatal_scores = [] idx = 0 for fatal_score in fatal_scores.T: if components_modeling: exog = fatal_scores_exog[:n_days_fatal, idx] if len(fatal_scores_exog)> 0 else None pred_exog = fatal_scores_exog[n_days_fatal:, idx] if len(fatal_scores_exog)> 0 else None y = fatal_score else: exog = fatal_scores_exog[:n_days_fatal, :] if len(fatal_scores_exog)> 0 else None pred_exog = fatal_scores_exog[n_days_fatal:, :] if len(fatal_scores_exog)> 0 else None y = fatal_df[loc].values try: model = arima.ARIMA(endog = y, exog = exog, order =(4, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: try: model = arima.ARIMA(endog = y, exog = exog, order =(3, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: try: model = arima.ARIMA(endog = y, exog = exog, order =(2, 1, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) except: model = arima.ARIMA(endog = y, exog = exog, order =(1, 0, 0)).fit( seasonal = False, trace = False, method = 'css', solver = 'bfgs', error_action = 'ignore', setpwise_fit = True, warn_convergence = True, disp = False) if not components_modeling: fatal_pred_traj = model.forecast(steps = forecast_days, alpha = 0.001, exog = pred_exog)[0] break else: forecasted_fatal_scores.append(model.forecast(steps = forecast_days, alpha = 0.001, exog = pred_exog)[0].tolist()) idx = idx + 1 if components_modeling: fatal_pred_traj = np.matmul(np.array(forecasted_fatal_scores ).T, fatal_pcs) fatal_pred_traj_df = pd.DataFrame(fatal_pred_traj, columns = pred_countries_fatal) else: fatal_pred_traj_df = pd.DataFrame() if loc in countries: fatal_pred_traj_df[loc] = country_metadata[country_metadata['Countries'] == loc]['Population'].values[0] * fatal_pred_traj elif loc in states: fatal_pred_traj_df[loc] = state_metadata[state_metadata['States'] == loc]['Population'].values[0] * fatal_pred_traj return pred_traj_df, fatal_pred_traj_df <predict_on_test>
class NeuralNetworkModel(BaseModel): def train_model(self, train_set, val_set): inputs = [] embeddings = [] embedding_out_dim = self.params['embedding_out_dim'] n_neuron = self.params['hidden_units'] for i in self.categoricals: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(self.train_df[i] ).max() + 1), embedding_out_dim, input_length=1 )(input_) embedding = Reshape(target_shape=(embedding_out_dim,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(self.features)- len(self.categoricals),)) embedding_numeric = Dense(n_neuron )(input_numeric) embedding_numeric = Mish()(embedding_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) for i in np.arange(self.params['hidden_layers'] - 1): x = Dense(n_neuron //(2 *(i+1)) )(x) x = Mish()(x) x = Dropout(self.params['hidden_dropout'] )(x) x = LayerNormalization()(x) if self.task == "regression": out = Dense(1, activation="linear", name = "out" )(x) loss = "mse" elif self.task == "classification": out = Dense(1, activation='sigmoid', name = 'out' )(x) loss = "binary_crossentropy" model = Model(inputs=inputs, outputs=out) model.compile(loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04)) er = EarlyStopping(patience=10, min_delta=1e-4, restore_best_weights=True, monitor='val_loss') ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR], epochs=self.params['epochs'], batch_size=self.params['batch_size'], validation_data=[val_set['X'], val_set['y']]) fi = np.zeros(len(self.features)) return model, fi def convert_dataset(self, x_train, y_train, x_val, y_val): train_set = {'X': x_train, 'y': y_train} val_set = {'X': x_val, 'y': y_val} return train_set, val_set def get_params(self): params = { 'input_dropout': 0.0, 'hidden_layers': 2, 'hidden_units': 128, 'embedding_out_dim': 4, 'hidden_activation': 'relu', 'hidden_dropout': 0.05, 'batch_norm': 'before_act', 'optimizer': {'type': 'adam', 'lr': 0.001}, 'batch_size': 128, 'epochs': 80 } return params
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
pred_traj_df, fatal_pred_traj_df = forecast_trajectories(training_data, countries, states, country_metadata, state_metadata, loc = 'New York', n_days_case = 55, n_days_fatal = 55, forecast_days = 33, rank = 3, min_num_cases = 2, components_modeling = True )<filter>
data_dict = {} for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*'): name = i.split('/')[-1].split('.')[0] if name != 'WTeamSpellings': data_dict[name] = pd.read_csv(i) else: data_dict[name] = pd.read_csv(i, encoding='cp1252' )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
def generate_prediction(test_data, training_data, countries_main, countries, states, min_num_cases): for country in countries: print(country) if country not in countries_main and country not in excl_list: test_loc_df = test_data[(test_data['Country_Region'] == country)&(pd.isnull(test_data.Province_State)) ].reset_index() train_loc_df = training_data[(training_data['Country_Region'] == country)&(pd.isnull(training_data.Province_State)) ].reset_index() test_start = test_loc_df['Date'][0] test_end = test_loc_df['Date'][len(test_loc_df)- 1] train_end = train_loc_df['Date'][len(train_loc_df)- 1] test_loc_df.loc[(( test_loc_df['Date'] >= test_start)) &(test_loc_df['Date'] <= train_end), 'ConfirmedCases'] = train_loc_df[(train_loc_df['Date'] >= test_start)&(train_loc_df['Date'] <= train_end)]['ConfirmedCases'].values test_loc_df.loc[(( test_loc_df['Date'] >= test_start)) &(test_loc_df['Date'] <= train_end), 'Fatalities'] = train_loc_df[(train_loc_df['Date'] >= test_start)&(train_loc_df['Date'] <= train_end)]['Fatalities'].values effective_df = train_loc_df[train_loc_df['ConfirmedCases'] >= min_num_cases] forecast_days = int(( test_end - train_end ).days) if len(effective_df)> 0: effective_train_start = train_loc_df[train_loc_df['ConfirmedCases'] >= min_num_cases].reset_index() ['Date'][0] n_days_case = int(( train_end - effective_train_start ).days)+ 1 pred_df, fatal_pred_df = forecast_trajectories(training_data, countries, states, country_metadata, state_metadata, loc = country, n_days_case = n_days_case, n_days_fatal = n_days_case, forecast_days = forecast_days, min_num_cases = min_num_cases, rank = 3) test_loc_df.loc[test_loc_df['Date'] > train_end, 'ConfirmedCases'] = np.maximum.accumulate(pred_df[country].values ).astype(int) test_data.loc[(test_data['Country_Region'] == country)&(pd.isnull(test_data.Province_State)) , 'ConfirmedCases'] = test_loc_df['ConfirmedCases'].values test_loc_df.loc[test_loc_df['Date'] > train_end, 'Fatalities'] = np.maximum.accumulate(fatal_pred_df[country].values ).astype(int) test_data.loc[(test_data['Country_Region'] == country)&(pd.isnull(test_data.Province_State)) , 'Fatalities'] = test_loc_df['Fatalities'].values for state in states: print(state) if str(state)not in excl_list: test_loc_df = test_data[(test_data['Province_State'] == state)].reset_index() train_loc_df = training_data[(training_data['Province_State'] == state)].reset_index() test_start = test_loc_df['Date'][0] test_end = test_loc_df['Date'][len(test_loc_df)- 1] train_end = train_loc_df['Date'][len(train_loc_df)- 1] test_loc_df.loc[(( test_loc_df['Date'] >= test_start)) &(test_loc_df['Date'] <= train_end), 'ConfirmedCases'] = train_loc_df[(train_loc_df['Date'] >= test_start)&(train_loc_df['Date'] <= train_end)]['ConfirmedCases'].values test_loc_df.loc[(( test_loc_df['Date'] >= test_start)) &(test_loc_df['Date'] <= train_end), 'Fatalities'] = train_loc_df[(train_loc_df['Date'] >= test_start)&(train_loc_df['Date'] <= train_end)]['Fatalities'].values effective_df = train_loc_df[train_loc_df['ConfirmedCases'] >= min_num_cases] forecast_days = int(( test_end - train_end ).days) if len(effective_df)> 0: effective_train_start = train_loc_df[train_loc_df['ConfirmedCases'] >= min_num_cases].reset_index() ['Date'][0] n_days_case = int(( train_end - effective_train_start ).days)+ 1 pred_df, fatal_pred_df = forecast_trajectories(training_data, countries, states, country_metadata, state_metadata, loc = state, n_days_case = n_days_case, n_days_fatal = n_days_case, forecast_days = forecast_days, min_num_cases = min_num_cases, rank = 3) test_loc_df.loc[test_loc_df['Date'] > train_end, 'ConfirmedCases'] = np.maximum.accumulate(pred_df[state].values ).astype(int) test_data.loc[test_data['Province_State'] == state, 'ConfirmedCases'] = test_loc_df['ConfirmedCases'].values test_loc_df.loc[test_loc_df['Date'] > train_end, 'Fatalities'] = np.maximum.accumulate(fatal_pred_df[state].values ).astype(int) test_data.loc[test_data['Province_State'] == state, 'Fatalities'] = test_loc_df['Fatalities'].values return test_data <load_from_csv>
data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3])) data_dict[fname].head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
test_data = pd.read_csv('/kaggle/input/inputs/test.csv') test_data['Date'] = pd.to_datetime(test_data['Date'] )<feature_engineering>
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
test_data['ConfirmedCases'] = None test_data['Fatalities'] = None<define_variables>
test = test.drop(['Pred'], axis=1) test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0])) test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1])) test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2])) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
excl_list = ['Hubei', 'Belize', 'Chad', 'Eritrea', 'Grenada', 'Guinea-Bissau', 'Holy See', 'Laos', 'Libya', 'Mali', 'Mozambique', 'Saint Kitts and Nevis', 'Somalia', 'Syria', 'nan', 'Saint Barthelemy', 'Virgin Islands', 'Montserrat', 'Diamond Princess']<predict_on_test>
gameCities = pd.merge(data_dict['WGameCities'], data_dict['Cities'], how='left', on=['CityID']) cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] train = train.merge(gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"]) train.head() cols_to_use = data_dict["WSeasons"].columns.difference(train.columns ).tolist() + ["Season"] train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"]) train.head() cols_to_use = data_dict["WTeams"].columns.difference(train.columns ).tolist() train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["WTeamID"], right_on=["TeamID"]) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
predictions = generate_prediction(test_data, training_data, countries_main, countries, states, min_num_cases = 2 )<count_unique_values>
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season'] train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) train.drop(['TeamID'], axis=1, inplace=True) train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) train.drop(['TeamID'], axis=1, inplace=True) print(train.shape) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
missing_countries = predictions[(predictions.ConfirmedCases.isnull())&(pd.isnull(predictions.Province_State)) ]['Country_Region'].unique().tolist() missing_states = predictions[(predictions.ConfirmedCases.isnull())&(pd.notnull(predictions.Province_State)) ]['Province_State'].unique().tolist()<split>
cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"] test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]), how="left", on=["Season", "WTeamID", "LTeamID"]) del gameCities gc.collect() test.head() cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"] test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]), how="left", on=["Season"]) test.head() cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist() test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["WTeamID"], right_on=["TeamID"]) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]), how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) test.head() cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season'] test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID']) test.drop(['TeamID'], axis=1, inplace=True) test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]), how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L')) test.drop(['TeamID'], axis=1, inplace=True) print(test.shape) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
def fill_excl_pred(predictions, training_data_original, test_data): for country in missing_countries: print(country) pred_loc_df = predictions[predictions['Country_Region'] == country] train_loc_df = training_data_original[training_data_original['Country_Region'] == country] series_comf_cases = train_loc_df['ConfirmedCases'].values series_fatal = train_loc_df['Fatalities'].values test_start = test_data[test_data['Country_Region'] == country]['Date'].values[0] series_comf_cases_test = training_data_original[(training_data_original['Country_Region'] == country)&(training_data_original['Date'] >= test_start)]['ConfirmedCases'] series_fatal_test = training_data_original[(training_data_original['Country_Region'] == country)&(training_data_original['Date'] >= test_start)]['Fatalities'] if len(series_comf_cases)> 0: regressor = LinearRegression() regressor.fit(np.arange(len(series_comf_cases_test)).reshape(-1, 1), series_comf_cases_test) comf_cases_pred = regressor.predict(np.arange(10, 40 ).reshape(-1, 1)) regressor.fit(np.arange(len(series_fatal_test)).reshape(-1, 1), series_fatal_test) fatal_pred = regressor.predict(np.arange(10, 40 ).reshape(-1, 1)) else: comf_cases_pred = [] fatal_pred = [] conf_cases_loc = np.concatenate(( series_comf_cases_test, comf_cases_pred), axis=0) fatal_loc = np.concatenate(( series_fatal_test, fatal_pred), axis=0) predictions.loc[predictions['Country_Region'] == country, 'ConfirmedCases'] = conf_cases_loc.astype(int) predictions.loc[predictions['Country_Region'] == country, 'Fatalities'] = fatal_loc.astype(int) for state in missing_states: print(state) pred_loc_df = predictions[predictions['Province_State'] == state] train_loc_df = training_data_original[training_data_original['Province_State'] == state] series_comf_cases = train_loc_df['ConfirmedCases'].values series_fatal = train_loc_df['Fatalities'].values test_start = test_data[test_data['Province_State'] == state]['Date'].values[0] series_comf_cases_test = training_data_original[(training_data_original['Province_State'] == state)&(training_data_original['Date'] >= test_start)]['ConfirmedCases'] series_fatal_test = training_data[(training_data_original['Province_State'] == state)&(training_data_original['Date'] >= test_start)]['Fatalities'] regressor = LinearRegression() regressor.fit(np.arange(len(series_comf_cases_test)).reshape(-1, 1), series_comf_cases_test) comf_cases_pred = regressor.predict(np.arange(10, 40 ).reshape(-1, 1)) regressor.fit(np.arange(len(series_fatal_test)).reshape(-1, 1), series_fatal_test) fatal_pred = regressor.predict(np.arange(10, 40 ).reshape(-1, 1)) conf_cases_loc = np.concatenate(( series_comf_cases_test, comf_cases_pred), axis=0) fatal_loc = np.concatenate(( series_fatal_test, fatal_pred), axis=0) predictions.loc[predictions['Province_State'] == state, 'ConfirmedCases'] = conf_cases_loc.astype(int) predictions.loc[predictions['Province_State'] == state, 'Fatalities'] = fatal_loc.astype(int) return predictions <predict_on_test>
not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ] print(not_exist_in_test) train = train.drop(not_exist_in_test, axis=1) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
prediction_final = fill_excl_pred(predictions, training_data_original, test_data )<data_type_conversions>
team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index() team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values] team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index() team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values] del regularSeason gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
predictions_csv = pd.DataFrame() predictions_csv['ForecastId'] = prediction_final['ForecastId'].astype(int) predictions_csv['ConfirmedCases'] = prediction_final['ConfirmedCases'].astype(float) predictions_csv['Fatalities'] = prediction_final['Fatalities'].astype(float )<save_to_csv>
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
predictions_csv.to_csv('submission.csv', index = False )<set_options>
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID']) test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID']) test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
pd.set_option('max_rows', 1000) <define_variables>
def preprocess(df): df['x_score'] = df['WScore sum_x'] + df['LScore sum_y'] df['y_score'] = df['WScore sum_y'] + df['LScore sum_x'] df['x_count'] = df['WScore count_x'] + df['LScore count_y'] df['y_count'] = df['WScore count_y'] + df['WScore count_x'] df['x_var'] = df['WScore var_x'] + df['LScore count_y'] df['y_var'] = df['WScore var_y'] + df['WScore var_x'] return df train = preprocess(train) test = preprocess(test )
Google Cloud & NCAA® ML Competition 2020-NCAAW