kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
21,316,401
BUREAU_LOAN_TYPES = bureau[['SK_ID_CURR', 'CREDIT_TYPE']].groupby(by = ['SK_ID_CURR'])['CREDIT_TYPE'].nunique().reset_index().rename(index=str, columns={'CREDIT_TYPE': 'BUREAU_LOAN_TYPES'}) app_train_bureau = app_train_bureau.merge(BUREAU_LOAN_TYPES, on = ['SK_ID_CURR'], how = 'left' ).fillna(0) print(app_train_bureau.shape) app_train_bureau.head()<feature_engineering>
x_train = data_train[:, 1:] x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) input_shape =(28, 28, 1 )
Digit Recognizer
21,316,401
app_train_bureau['AVERAGE_LOAN_TYPE'] = app_train_bureau['BUREAU_LOAN_COUNT']/app_train_bureau['BUREAU_LOAN_TYPES'] app_train_bureau = app_train_bureau.fillna(0) print(app_train_bureau.shape) app_train_bureau.head()<drop_column>
x_train = x_train / 255.0
Digit Recognizer
21,316,401
del app_train_bureau['BUREAU_LOAN_COUNT'], app_train_bureau['BUREAU_LOAN_TYPES'] app_train_bureau.head()<feature_engineering>
y_train = data_train[:, 0] y_train[:5]
Digit Recognizer
21,316,401
def f(x): if x == 'Closed': y = 0 else: y = 1 return y bureau_fe1 = bureau bureau_fe1['CREDIT_ACTIVE_CLOSED'] = bureau_fe1.apply(lambda x: f(x.CREDIT_ACTIVE), axis = 1) bureau_fe1.head()<merge>
y_train = utils.to_categorical(y_train )
Digit Recognizer
21,316,401
grp = bureau_fe1.groupby(by = ['SK_ID_CURR'])['CREDIT_ACTIVE_CLOSED'].mean().reset_index().rename(index=str, columns={'CREDIT_ACTIVE_CLOSED':'ACTIVE_LOANS_PERCENTAGE'}) app_train_bureau = app_train_bureau.merge(grp, on = ['SK_ID_CURR'], how = 'left') del bureau_fe1['CREDIT_ACTIVE_CLOSED'] print(bureau_fe1.shape) bureau_fe1.head()<data_type_conversions>
random_seed = 2 X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state=random_seed )
Digit Recognizer
21,316,401
app_train_bureau = app_train_bureau.fillna(0) app_train_bureau.head()<feature_engineering>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1)
Digit Recognizer
21,316,401
app_train_bureau['BUREAU_ENDDATE_FACT_DIFF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] app_train_bureau['BUREAU_CREDIT_FACT_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_ENDDATE_FACT'] app_train_bureau['BUREAU_CREDIT_ENDDATE_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE'] app_train_bureau['BUREAU_CREDIT_DEBT_RATIO'] = bureau['AMT_CREDIT_SUM_DEBT'] / bureau['AMT_CREDIT_SUM'] app_train_bureau['BUREAU_CREDIT_DEBT_DIFF'] = bureau['AMT_CREDIT_SUM_DEBT'] - bureau['AMT_CREDIT_SUM'] app_train_bureau = app_train_bureau.fillna(0) app_train_bureau.head()<prepare_output>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"] )
Digit Recognizer
21,316,401
app_train = app_train_bureau<split>
checkpoint = ModelCheckpoint('mnist-cnn.h5', monitor='val_acc', save_best_only=True, verbose=1 )
Digit Recognizer
21,316,401
ftr_app = app_train.drop(columns=['SK_ID_CURR','TARGET']) target_app = app_train['TARGET'] train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020) train_x.shape, valid_x.shape<train_on_grid>
learn_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001)
Digit Recognizer
21,316,401
def lgb_cv(num_leaves, learning_rate, n_estimators, subsample, colsample_bytree, reg_alpha, reg_lambda, x_data=None, y_data=None, n_splits=5, output='score'): score = 0 kf = KFold(n_splits=n_splits) models = [] for train_index, valid_index in kf.split(x_data): x_train, y_train = x_data.reindex([train_index]), y_data.reindex([train_index]) x_valid, y_valid = x_data.reindex([valid_index]), y_data.reindex([valid_index]) model = LGBMClassifier( num_leaves = int(num_leaves), learning_rate = learning_rate, n_estimators = int(n_estimators), subsample = np.clip(subsample, 0, 1), colsample_bytree = np.clip(colsample_bytree, 0, 1), reg_alpha = reg_alpha, reg_lambda = reg_lambda, max_depth=16, ) model.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= False, early_stopping_rounds= 100) models.append(model) pred = model.predict_proba(valid_x)[:, 1] true = valid_y score += roc_auc_score(true, pred)/n_splits if output == 'score': return score if output == 'model': return models<choose_model_class>
batch_size=96 history = model.fit(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs=30, validation_data=(X_val, Y_val), steps_per_epoch=X_train.shape[0] // batch_size, verbose=1, callbacks=[checkpoint, learn_rate_reduction] )
Digit Recognizer
21,316,401
func_fixed = partial(lgb_cv, x_data=train_x, y_data=train_y, n_splits=5, output='score') lgbBO = BayesianOptimization( func_fixed, { 'num_leaves':(16, 1024), 'learning_rate':(0.0001, 0.1), 'n_estimators':(16, 1024), 'subsample':(0, 1), 'colsample_bytree':(0, 1), 'reg_alpha':(0, 10), 'reg_lambda':(0, 50), }, random_state=2020 ) lgbBO.maximize(init_points=5, n_iter=10 )<train_model>
data_test = np.loadtxt('/kaggle/input/digit-recognizer/test.csv', skiprows = 1, delimiter=',') x_test = data_test.reshape(data_test.shape[0], 28, 28, 1) x_test /= 255.0
Digit Recognizer
21,316,401
clf = LGBMClassifier( n_estimators=int(lgbBO.max['params']['n_estimators']), learning_rate=lgbBO.max['params']['learning_rate'], num_leaves=int(lgbBO.max['params']['num_leaves']), subsample=lgbBO.max['params']['subsample'], max_depth=16, reg_alpha=lgbBO.max['params']['reg_alpha'], reg_lambda=lgbBO.max['params']['reg_lambda']) clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200 )<merge>
predict = model.predict(x_test) predict = np.argmax(predict, axis=1 )
Digit Recognizer
21,316,401
test_merge = app_test.merge(prev_amt_agg, on='SK_ID_CURR', how='left', indicator=False) test_merge = test_merge.merge(prev_approved, on='SK_ID_CURR', how='left', indicator=False) test_merge = test_merge.merge(prev_refused, on='SK_ID_CURR', how='left', indicator=False) test_merge['PRE_CONTRACT_APPROVED_RATE'] = test_merge['PRE_CONTRACT_APPROVED'] /(test_merge['PRE_CONTRACT_APPROVED'] + test_merge['PRE_CONTRACT_REFUSED']) test_merge['PRE_CONTRACT_REFUSED_RATE'] = test_merge['PRE_CONTRACT_REFUSED'] /(test_merge['PRE_CONTRACT_APPROVED'] + test_merge['PRE_CONTRACT_REFUSED']) test_merge = test_merge.replace(float('NaN'),0) test_merge = test_merge.merge(PAST_LOANS_PER_CUS, on = ['SK_ID_CURR'], how = 'left') test_merge = test_merge.merge(BUREAU_LOAN_TYPES, on = ['SK_ID_CURR'], how = 'left' ).fillna(0) test_merge['AVERAGE_LOAN_TYPE'] = test_merge['BUREAU_LOAN_COUNT']/test_merge['BUREAU_LOAN_TYPES'] test_merge = test_merge.fillna(0) del test_merge['BUREAU_LOAN_COUNT'], test_merge['BUREAU_LOAN_TYPES'] test_merge = test_merge.merge(grp, on = ['SK_ID_CURR'], how = 'left' ).fillna(0) test_merge['BUREAU_ENDDATE_FACT_DIFF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] test_merge['BUREAU_CREDIT_FACT_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_ENDDATE_FACT'] test_merge['BUREAU_CREDIT_ENDDATE_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE'] test_merge['BUREAU_CREDIT_DEBT_RATIO'] = bureau['AMT_CREDIT_SUM_DEBT'] / bureau['AMT_CREDIT_SUM'] test_merge['BUREAU_CREDIT_DEBT_DIFF'] = bureau['AMT_CREDIT_SUM_DEBT'] - bureau['AMT_CREDIT_SUM'] test_merge = test_merge.fillna(0) preds = clf.predict_proba(test_merge.drop(columns=['SK_ID_CURR'])) [:, 1] app_test['TARGET'] = preds app_test[['SK_ID_CURR', 'TARGET']].to_csv('result_00.csv', index=False )<set_options>
out = np.column_stack(( range(1, predict.shape[0]+1), predict))
Digit Recognizer
21,316,401
<import_modules><EOS>
np.savetxt('submission.csv', out, header="ImageId,Label", comments="", fmt="%d,%d" )
Digit Recognizer
21,256,766
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
%matplotlib inline
Digit Recognizer
21,256,766
default_dir = ".. /input/home-credit-default-risk/"<data_type_conversions>
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
21,256,766
def get_balance_data() : pos_dtype = { 'SK_ID_PREV':np.uint32, 'SK_ID_CURR':np.uint32, 'MONTHS_BALANCE':np.int32, 'SK_DPD':np.int32, 'SK_DPD_DEF':np.int32, 'CNT_INSTALMENT':np.float32,'CNT_INSTALMENT_FUTURE':np.float32 } install_dtype = { 'SK_ID_PREV':np.uint32, 'SK_ID_CURR':np.uint32, 'NUM_INSTALMENT_NUMBER':np.int32, 'NUM_INSTALMENT_VERSION':np.float32, 'DAYS_INSTALMENT':np.float32, 'DAYS_ENTRY_PAYMENT':np.float32, 'AMT_INSTALMENT':np.float32, 'AMT_PAYMENT':np.float32 } card_dtype = { 'SK_ID_PREV':np.uint32, 'SK_ID_CURR':np.uint32, 'MONTHS_BALANCE':np.int16, 'AMT_CREDIT_LIMIT_ACTUAL':np.int32, 'CNT_DRAWINGS_CURRENT':np.int32, 'SK_DPD':np.int32,'SK_DPD_DEF':np.int32, 'AMT_BALANCE':np.float32, 'AMT_DRAWINGS_ATM_CURRENT':np.float32, 'AMT_DRAWINGS_CURRENT':np.float32, 'AMT_DRAWINGS_OTHER_CURRENT':np.float32, 'AMT_DRAWINGS_POS_CURRENT':np.float32, 'AMT_INST_MIN_REGULARITY':np.float32, 'AMT_PAYMENT_CURRENT':np.float32, 'AMT_PAYMENT_TOTAL_CURRENT':np.float32, 'AMT_RECEIVABLE_PRINCIPAL':np.float32, 'AMT_RECIVABLE':np.float32, 'AMT_TOTAL_RECEIVABLE':np.float32, 'CNT_DRAWINGS_ATM_CURRENT':np.float32, 'CNT_DRAWINGS_OTHER_CURRENT':np.float32, 'CNT_DRAWINGS_POS_CURRENT':np.float32, 'CNT_INSTALMENT_MATURE_CUM':np.float32 } pos_bal = pd.read_csv(os.path.join(default_dir,'POS_CASH_balance.csv'), dtype=pos_dtype) install = pd.read_csv(os.path.join(default_dir,'installments_payments.csv'), dtype=install_dtype) card_bal = pd.read_csv(os.path.join(default_dir, 'credit_card_balance.csv'), dtype=card_dtype) return pos_bal, install, card_bal pos_bal, install, card_bal = get_balance_data()<import_modules>
X = train_data.drop(["label"],axis = 1 ).values Y = train_data["label"].values
Digit Recognizer
21,256,766
from sklearn.model_selection import train_test_split from lightgbm import LGBMClassifier<feature_engineering>
X = X.reshape([42000,28,28,1]) Y = Y.reshape([42000,1] )
Digit Recognizer
21,256,766
def get_apps_processed(apps): apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis = 1) apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1) apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean()) apps['APPS_ANNUITY_CREDIT_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_CREDIT'] apps['APPS_GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT'] apps['APPS_ANNUITY_INCOME_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_INCOME_TOTAL'] apps['APPS_CREDIT_INCOME_RATIO'] = apps['AMT_CREDIT']/apps['AMT_INCOME_TOTAL'] apps['APPS_GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL'] apps['APPS_CNT_FAM_INCOME_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['CNT_FAM_MEMBERS'] apps['APPS_EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH'] apps['APPS_INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED'] apps['APPS_INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH'] apps['APPS_CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH'] apps['APPS_CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED'] return apps def get_prev_processed(prev): prev['PREV_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT'] prev['PREV_GOODS_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_GOODS_PRICE'] prev['PREV_CREDIT_APPL_RATIO'] = prev['AMT_CREDIT']/prev['AMT_APPLICATION'] prev['PREV_GOODS_APPL_RATIO'] = prev['AMT_GOODS_PRICE']/prev['AMT_APPLICATION'] prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True) prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True) prev['PREV_DAYS_LAST_DUE_DIFF'] = prev['DAYS_LAST_DUE_1ST_VERSION'] - prev['DAYS_LAST_DUE'] all_pay = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT'] prev['PREV_INTERESTS_RATE'] =(all_pay/prev['AMT_CREDIT'] - 1)/prev['CNT_PAYMENT'] return prev def get_prev_amt_agg(prev): agg_dict = { 'SK_ID_CURR':['count'], 'AMT_CREDIT':['mean', 'max', 'sum'], 'AMT_ANNUITY':['mean', 'max', 'sum'], 'AMT_APPLICATION':['mean', 'max', 'sum'], 'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'], 'AMT_GOODS_PRICE':['mean', 'max', 'sum'], 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'], 'DAYS_DECISION': ['min', 'max', 'mean'], 'CNT_PAYMENT': ['mean', 'sum'], 'PREV_CREDIT_DIFF':['mean', 'max', 'sum'], 'PREV_CREDIT_APPL_RATIO':['mean', 'max'], 'PREV_GOODS_DIFF':['mean', 'max', 'sum'], 'PREV_GOODS_APPL_RATIO':['mean', 'max'], 'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'], 'PREV_INTERESTS_RATE':['mean', 'max'] } prev_group = prev.groupby('SK_ID_CURR') prev_amt_agg = prev_group.agg(agg_dict) prev_amt_agg.columns = ["PREV_"+ "_".join(x ).upper() for x in prev_amt_agg.columns.ravel() ] return prev_amt_agg def get_prev_refused_appr_agg(prev): prev_refused_appr_group = prev[prev['NAME_CONTRACT_STATUS'].isin(['Approved', 'Refused'])].groupby([ 'SK_ID_CURR', 'NAME_CONTRACT_STATUS']) prev_refused_appr_agg = prev_refused_appr_group['SK_ID_CURR'].count().unstack() prev_refused_appr_agg.columns = ['PREV_APPROVED_COUNT', 'PREV_REFUSED_COUNT' ] prev_refused_appr_agg = prev_refused_appr_agg.fillna(0) return prev_refused_appr_agg def get_prev_days365_agg(prev): cond_days365 = prev['DAYS_DECISION'] > -365 prev_days365_group = prev[cond_days365].groupby('SK_ID_CURR') agg_dict = { 'SK_ID_CURR':['count'], 'AMT_CREDIT':['mean', 'max', 'sum'], 'AMT_ANNUITY':['mean', 'max', 'sum'], 'AMT_APPLICATION':['mean', 'max', 'sum'], 'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'], 'AMT_GOODS_PRICE':['mean', 'max', 'sum'], 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'], 'DAYS_DECISION': ['min', 'max', 'mean'], 'CNT_PAYMENT': ['mean', 'sum'], 'PREV_CREDIT_DIFF':['mean', 'max', 'sum'], 'PREV_CREDIT_APPL_RATIO':['mean', 'max'], 'PREV_GOODS_DIFF':['mean', 'max', 'sum'], 'PREV_GOODS_APPL_RATIO':['mean', 'max'], 'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'], 'PREV_INTERESTS_RATE':['mean', 'max'] } prev_days365_agg = prev_days365_group.agg(agg_dict) prev_days365_agg.columns = ["PREV_D365_"+ "_".join(x ).upper() for x in prev_days365_agg.columns.ravel() ] return prev_days365_agg def get_prev_agg(prev): prev = get_prev_processed(prev) prev_amt_agg = get_prev_amt_agg(prev) prev_refused_appr_agg = get_prev_refused_appr_agg(prev) prev_days365_agg = get_prev_days365_agg(prev) prev_agg = prev_amt_agg.merge(prev_refused_appr_agg, on='SK_ID_CURR', how='left') prev_agg = prev_agg.merge(prev_days365_agg, on='SK_ID_CURR', how='left') prev_agg['PREV_REFUSED_RATIO'] = prev_agg['PREV_REFUSED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT'] prev_agg['PREV_APPROVED_RATIO'] = prev_agg['PREV_APPROVED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT'] prev_agg = prev_agg.drop(['PREV_REFUSED_COUNT', 'PREV_APPROVED_COUNT'], axis=1) return prev_agg def get_bureau_processed(bureau): bureau['BUREAU_ENDDATE_FACT_DIFF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] bureau['BUREAU_CREDIT_FACT_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_ENDDATE_FACT'] bureau['BUREAU_CREDIT_ENDDATE_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE'] bureau['BUREAU_CREDIT_DEBT_RATIO']=bureau['AMT_CREDIT_SUM_DEBT']/bureau['AMT_CREDIT_SUM'] bureau['BUREAU_CREDIT_DEBT_DIFF'] = bureau['AMT_CREDIT_SUM_DEBT'] - bureau['AMT_CREDIT_SUM'] bureau['BUREAU_IS_DPD'] = bureau['CREDIT_DAY_OVERDUE'].apply(lambda x: 1 if x > 0 else 0) bureau['BUREAU_IS_DPD_OVER120'] = bureau['CREDIT_DAY_OVERDUE'].apply(lambda x: 1 if x >120 else 0) return bureau def get_bureau_day_amt_agg(bureau): bureau_agg_dict = { 'SK_ID_BUREAU':['count'], 'DAYS_CREDIT':['min', 'max', 'mean'], 'CREDIT_DAY_OVERDUE':['min', 'max', 'mean'], 'DAYS_CREDIT_ENDDATE':['min', 'max', 'mean'], 'DAYS_ENDDATE_FACT':['min', 'max', 'mean'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'], 'AMT_ANNUITY': ['max', 'mean', 'sum'], 'BUREAU_ENDDATE_FACT_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_FACT_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_ENDDATE_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_DEBT_RATIO':['min', 'max', 'mean'], 'BUREAU_CREDIT_DEBT_DIFF':['min', 'max', 'mean'], 'BUREAU_IS_DPD':['mean', 'sum'], 'BUREAU_IS_DPD_OVER120':['mean', 'sum'] } bureau_grp = bureau.groupby('SK_ID_CURR') bureau_day_amt_agg = bureau_grp.agg(bureau_agg_dict) bureau_day_amt_agg.columns = ['BUREAU_'+('_' ).join(column ).upper() for column in bureau_day_amt_agg.columns.ravel() ] bureau_day_amt_agg = bureau_day_amt_agg.reset_index() return bureau_day_amt_agg def get_bureau_active_agg(bureau): cond_active = bureau['CREDIT_ACTIVE'] == 'Active' bureau_active_grp = bureau[cond_active].groupby(['SK_ID_CURR']) bureau_agg_dict = { 'SK_ID_BUREAU':['count'], 'DAYS_CREDIT':['min', 'max', 'mean'], 'CREDIT_DAY_OVERDUE':['min', 'max', 'mean'], 'DAYS_CREDIT_ENDDATE':['min', 'max', 'mean'], 'DAYS_ENDDATE_FACT':['min', 'max', 'mean'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'], 'AMT_ANNUITY': ['max', 'mean', 'sum'], 'BUREAU_ENDDATE_FACT_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_FACT_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_ENDDATE_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_DEBT_RATIO':['min', 'max', 'mean'], 'BUREAU_CREDIT_DEBT_DIFF':['min', 'max', 'mean'], 'BUREAU_IS_DPD':['mean', 'sum'], 'BUREAU_IS_DPD_OVER120':['mean', 'sum'] } bureau_active_agg = bureau_active_grp.agg(bureau_agg_dict) bureau_active_agg.columns = ['BUREAU_ACT_'+('_' ).join(column ).upper() for column in bureau_active_agg.columns.ravel() ] bureau_active_agg = bureau_active_agg.reset_index() return bureau_active_agg def get_bureau_days750_agg(bureau): cond_days750 = bureau['DAYS_CREDIT'] > -750 bureau_days750_group = bureau[cond_days750].groupby('SK_ID_CURR') bureau_agg_dict = { 'SK_ID_BUREAU':['count'], 'DAYS_CREDIT':['min', 'max', 'mean'], 'CREDIT_DAY_OVERDUE':['min', 'max', 'mean'], 'DAYS_CREDIT_ENDDATE':['min', 'max', 'mean'], 'DAYS_ENDDATE_FACT':['min', 'max', 'mean'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'], 'AMT_ANNUITY': ['max', 'mean', 'sum'], 'BUREAU_ENDDATE_FACT_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_FACT_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_ENDDATE_DIFF':['min', 'max', 'mean'], 'BUREAU_CREDIT_DEBT_RATIO':['min', 'max', 'mean'], 'BUREAU_CREDIT_DEBT_DIFF':['min', 'max', 'mean'], 'BUREAU_IS_DPD':['mean', 'sum'], 'BUREAU_IS_DPD_OVER120':['mean', 'sum'] } bureau_days750_agg = bureau_days750_group.agg(bureau_agg_dict) bureau_days750_agg.columns = ['BUREAU_ACT_'+('_' ).join(column ).upper() for column in bureau_days750_agg.columns.ravel() ] bureau_days750_agg = bureau_days750_agg.reset_index() return bureau_days750_agg def get_bureau_bal_agg(bureau, bureau_bal): bureau_bal = bureau_bal.merge(bureau[['SK_ID_CURR', 'SK_ID_BUREAU']], on='SK_ID_BUREAU', how='left') bureau_bal['BUREAU_BAL_IS_DPD'] = bureau_bal['STATUS'].apply(lambda x: 1 if x in['1','2','3','4','5'] else 0) bureau_bal['BUREAU_BAL_IS_DPD_OVER120'] = bureau_bal['STATUS'].apply(lambda x: 1 if x =='5' else 0) bureau_bal_grp = bureau_bal.groupby('SK_ID_CURR') bureau_bal_agg_dict = { 'SK_ID_CURR':['count'], 'MONTHS_BALANCE':['min', 'max', 'mean'], 'BUREAU_BAL_IS_DPD':['mean', 'sum'], 'BUREAU_BAL_IS_DPD_OVER120':['mean', 'sum'] } bureau_bal_agg = bureau_bal_grp.agg(bureau_bal_agg_dict) bureau_bal_agg.columns = [ 'BUREAU_BAL_'+('_' ).join(column ).upper() for column in bureau_bal_agg.columns.ravel() ] bureau_bal_agg = bureau_bal_agg.reset_index() return bureau_bal_agg def get_bureau_agg(bureau, bureau_bal): bureau = get_bureau_processed(bureau) bureau_day_amt_agg = get_bureau_day_amt_agg(bureau) bureau_active_agg = get_bureau_active_agg(bureau) bureau_days750_agg = get_bureau_days750_agg(bureau) bureau_bal_agg = get_bureau_bal_agg(bureau, bureau_bal) bureau_agg = bureau_day_amt_agg.merge(bureau_active_agg, on='SK_ID_CURR', how='left') bureau_agg['BUREAU_ACT_IS_DPD_RATIO'] = bureau_agg['BUREAU_ACT_BUREAU_IS_DPD_SUM']/bureau_agg['BUREAU_SK_ID_BUREAU_COUNT'] bureau_agg['BUREAU_ACT_IS_DPD_OVER120_RATIO'] = bureau_agg['BUREAU_ACT_BUREAU_IS_DPD_OVER120_SUM']/bureau_agg['BUREAU_SK_ID_BUREAU_COUNT'] bureau_agg = bureau_agg.merge(bureau_bal_agg, on='SK_ID_CURR', how='left') bureau_agg = bureau_agg.merge(bureau_days750_agg, on='SK_ID_CURR', how='left') return bureau_agg def get_apps_all_with_prev_agg(apps, prev): apps_all = get_apps_processed(apps) prev_agg = get_prev_agg(prev) print('prev_agg shape:', prev_agg.shape) print('apps_all before merge shape:', apps_all.shape) apps_all = apps_all.merge(prev_agg, on='SK_ID_CURR', how='left') print('apps_all after merge with prev_agg shape:', apps_all.shape) return apps_all def get_apps_all_encoded(apps_all): object_columns = apps_all.dtypes[apps_all.dtypes == 'object'].index.tolist() for column in object_columns: apps_all[column] = pd.factorize(apps_all[column])[0] return apps_all def get_apps_all_train_test(apps_all): apps_all_train = apps_all[~apps_all['TARGET'].isnull() ] apps_all_test = apps_all[apps_all['TARGET'].isnull() ] apps_all_test = apps_all_test.drop('TARGET', axis=1) return apps_all_train, apps_all_test def train_apps_all(apps_all_train): ftr_app = apps_all_train.drop(['SK_ID_CURR', 'TARGET'], axis=1) target_app = apps_all_train['TARGET'] train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020) print('train shape:', train_x.shape, 'valid shape:', valid_x.shape) clf = LGBMClassifier( nthread=4, n_estimators=2000, learning_rate=0.02, max_depth = 11, num_leaves=58, colsample_bytree=0.613, subsample=0.708, max_bin=407, reg_alpha=3.564, reg_lambda=4.930, min_child_weight= 6, min_child_samples=165, silent=-1, verbose=-1, ) clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200) return clf<define_variables>
Y = to_categorical(Y, num_classes = 10 )
Digit Recognizer
21,256,766
def get_pos_bal_agg(pos_bal): cond_over_0 = pos_bal['SK_DPD'] > 0 cond_100 =(pos_bal['SK_DPD'] < 100)&(pos_bal['SK_DPD'] > 0) cond_over_100 =(pos_bal['SK_DPD'] >= 100) pos_bal['POS_IS_DPD'] = pos_bal['SK_DPD'].apply(lambda x: 1 if x > 0 else 0) pos_bal['POS_IS_DPD_UNDER_120'] = pos_bal['SK_DPD'].apply(lambda x:1 if(x > 0)&(x <120)else 0) pos_bal['POS_IS_DPD_OVER_120'] = pos_bal['SK_DPD'].apply(lambda x:1 if x >= 120 else 0) pos_bal_grp = pos_bal.groupby('SK_ID_CURR') pos_bal_agg_dict = { 'SK_ID_CURR':['count'], 'MONTHS_BALANCE':['min', 'mean', 'max'], 'SK_DPD':['min', 'max', 'mean', 'sum'], 'CNT_INSTALMENT':['min', 'max', 'mean', 'sum'], 'CNT_INSTALMENT_FUTURE':['min', 'max', 'mean', 'sum'], 'POS_IS_DPD':['mean', 'sum'], 'POS_IS_DPD_UNDER_120':['mean', 'sum'], 'POS_IS_DPD_OVER_120':['mean', 'sum'] } pos_bal_agg = pos_bal_grp.agg(pos_bal_agg_dict) pos_bal_agg.columns = [('POS_')+('_' ).join(column ).upper() for column in pos_bal_agg.columns.ravel() ] cond_months = pos_bal['MONTHS_BALANCE'] > -20 pos_bal_m20_grp = pos_bal[cond_months].groupby('SK_ID_CURR') pos_bal_m20_agg_dict = { 'SK_ID_CURR':['count'], 'MONTHS_BALANCE':['min', 'mean', 'max'], 'SK_DPD':['min', 'max', 'mean', 'sum'], 'CNT_INSTALMENT':['min', 'max', 'mean', 'sum'], 'CNT_INSTALMENT_FUTURE':['min', 'max', 'mean', 'sum'], 'POS_IS_DPD':['mean', 'sum'], 'POS_IS_DPD_UNDER_120':['mean', 'sum'], 'POS_IS_DPD_OVER_120':['mean', 'sum'] } pos_bal_m20_agg = pos_bal_m20_grp.agg(pos_bal_m20_agg_dict) pos_bal_m20_agg.columns = [('POS_M20')+('_' ).join(column ).upper() for column in pos_bal_m20_agg.columns.ravel() ] pos_bal_agg = pos_bal_agg.merge(pos_bal_m20_agg, on='SK_ID_CURR', how='left') pos_bal_agg = pos_bal_agg.reset_index() return pos_bal_agg def get_install_agg(install): install['AMT_DIFF'] = install['AMT_INSTALMENT'] - install['AMT_PAYMENT'] install['AMT_RATIO'] =(install['AMT_PAYMENT'] +1)/(install['AMT_INSTALMENT'] + 1) install['SK_DPD'] = install['DAYS_ENTRY_PAYMENT'] - install['DAYS_INSTALMENT'] install['INS_IS_DPD'] = install['SK_DPD'].apply(lambda x: 1 if x > 0 else 0) install['INS_IS_DPD_UNDER_120'] = install['SK_DPD'].apply(lambda x:1 if(x > 0)&(x <120)else 0) install['INS_IS_DPD_OVER_120'] = install['SK_DPD'].apply(lambda x:1 if x >= 120 else 0) install_grp = install.groupby('SK_ID_CURR') install_agg_dict = { 'SK_ID_CURR':['count'], 'NUM_INSTALMENT_VERSION':['nunique'], 'DAYS_ENTRY_PAYMENT':['mean', 'max', 'sum'], 'DAYS_INSTALMENT':['mean', 'max', 'sum'], 'AMT_INSTALMENT':['mean', 'max', 'sum'], 'AMT_PAYMENT':['mean', 'max','sum'], 'AMT_DIFF':['mean','min', 'max','sum'], 'AMT_RATIO':['mean', 'max'], 'SK_DPD':['mean', 'min', 'max'], 'INS_IS_DPD':['mean', 'sum'], 'INS_IS_DPD_UNDER_120':['mean', 'sum'], 'INS_IS_DPD_OVER_120':['mean', 'sum'] } install_agg = install_grp.agg(install_agg_dict) install_agg.columns = ['INS_'+('_' ).join(column ).upper() for column in install_agg.columns.ravel() ] cond_day = install['DAYS_ENTRY_PAYMENT'] >= -365 install_d365_grp = install[cond_day].groupby('SK_ID_CURR') install_d365_agg_dict = { 'SK_ID_CURR':['count'], 'NUM_INSTALMENT_VERSION':['nunique'], 'DAYS_ENTRY_PAYMENT':['mean', 'max', 'sum'], 'DAYS_INSTALMENT':['mean', 'max', 'sum'], 'AMT_INSTALMENT':['mean', 'max', 'sum'], 'AMT_PAYMENT':['mean', 'max','sum'], 'AMT_DIFF':['mean','min', 'max','sum'], 'AMT_RATIO':['mean', 'max'], 'SK_DPD':['mean', 'min', 'max'], 'INS_IS_DPD':['mean', 'sum'], 'INS_IS_DPD_UNDER_120':['mean', 'sum'], 'INS_IS_DPD_OVER_120':['mean', 'sum'] } install_d365_agg = install_d365_grp.agg(install_d365_agg_dict) install_d365_agg.columns = ['INS_D365'+('_' ).join(column ).upper() for column in install_d365_agg.columns.ravel() ] install_agg = install_agg.merge(install_d365_agg, on='SK_ID_CURR', how='left') install_agg = install_agg.reset_index() return install_agg def get_card_bal_agg(card_bal): card_bal['BALANCE_LIMIT_RATIO'] = card_bal['AMT_BALANCE']/card_bal['AMT_CREDIT_LIMIT_ACTUAL'] card_bal['DRAWING_LIMIT_RATIO'] = card_bal['AMT_DRAWINGS_CURRENT'] / card_bal['AMT_CREDIT_LIMIT_ACTUAL'] card_bal['CARD_IS_DPD'] = card_bal['SK_DPD'].apply(lambda x: 1 if x > 0 else 0) card_bal['CARD_IS_DPD_UNDER_120'] = card_bal['SK_DPD'].apply(lambda x:1 if(x > 0)&(x <120)else 0) card_bal['CARD_IS_DPD_OVER_120'] = card_bal['SK_DPD'].apply(lambda x:1 if x >= 120 else 0) card_bal_grp = card_bal.groupby('SK_ID_CURR') card_bal_agg_dict = { 'SK_ID_CURR':['count'], 'AMT_BALANCE':['max'], 'AMT_CREDIT_LIMIT_ACTUAL':['max'], 'AMT_DRAWINGS_ATM_CURRENT': ['max', 'sum'], 'AMT_DRAWINGS_CURRENT': ['max', 'sum'], 'AMT_DRAWINGS_POS_CURRENT': ['max', 'sum'], 'AMT_INST_MIN_REGULARITY': ['max', 'mean'], 'AMT_PAYMENT_TOTAL_CURRENT': ['max','sum'], 'AMT_TOTAL_RECEIVABLE': ['max', 'mean'], 'CNT_DRAWINGS_ATM_CURRENT': ['max','sum'], 'CNT_DRAWINGS_CURRENT': ['max', 'mean', 'sum'], 'CNT_DRAWINGS_POS_CURRENT': ['mean'], 'SK_DPD': ['mean', 'max', 'sum'], 'BALANCE_LIMIT_RATIO':['min','max'], 'DRAWING_LIMIT_RATIO':['min', 'max'], 'CARD_IS_DPD':['mean', 'sum'], 'CARD_IS_DPD_UNDER_120':['mean', 'sum'], 'CARD_IS_DPD_OVER_120':['mean', 'sum'] } card_bal_agg = card_bal_grp.agg(card_bal_agg_dict) card_bal_agg.columns = ['CARD_'+('_' ).join(column ).upper() for column in card_bal_agg.columns.ravel() ] card_bal_agg = card_bal_agg.reset_index() cond_month = card_bal.MONTHS_BALANCE >= -3 card_bal_m3_grp = card_bal[cond_month].groupby('SK_ID_CURR') card_bal_m3_agg = card_bal_m3_grp.agg(card_bal_agg_dict) card_bal_m3_agg.columns = ['CARD_M3'+('_' ).join(column ).upper() for column in card_bal_m3_agg.columns.ravel() ] card_bal_agg = card_bal_agg.merge(card_bal_m3_agg, on='SK_ID_CURR', how='left') card_bal_agg = card_bal_agg.reset_index() return card_bal_agg<categorify>
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.1, random_state = 14 )
Digit Recognizer
21,256,766
def get_apps_all_with_all_agg(apps, prev, bureau, bureau_bal, pos_bal, install, card_bal): apps_all = get_apps_processed(apps) prev_agg = get_prev_agg(prev) bureau_agg = get_bureau_agg(bureau, bureau_bal) pos_bal_agg = get_pos_bal_agg(pos_bal) install_agg = get_install_agg(install) card_bal_agg = get_card_bal_agg(card_bal) print('prev_agg shape:', prev_agg.shape, 'bureau_agg shape:', bureau_agg.shape) print('pos_bal_agg shape:', pos_bal_agg.shape, 'install_agg shape:', install_agg.shape, 'card_bal_agg shape:', card_bal_agg.shape) print('apps_all before merge shape:', apps_all.shape) apps_all = apps_all.merge(prev_agg, on='SK_ID_CURR', how='left') apps_all = apps_all.merge(bureau_agg, on='SK_ID_CURR', how='left') apps_all = apps_all.merge(pos_bal_agg, on='SK_ID_CURR', how='left') apps_all = apps_all.merge(install_agg, on='SK_ID_CURR', how='left') apps_all = apps_all.merge(card_bal_agg, on='SK_ID_CURR', how='left') print('apps_all after merge with all shape:', apps_all.shape) return apps_all<load_from_csv>
x_train = x_train/255 x_test = x_test/255
Digit Recognizer
21,256,766
def get_dataset() : app_train = pd.read_csv(os.path.join(default_dir,'application_train.csv')) app_test = pd.read_csv(os.path.join(default_dir,'application_test.csv')) apps = pd.concat([app_train, app_test]) prev = pd.read_csv(os.path.join(default_dir,'previous_application.csv')) bureau = pd.read_csv(os.path.join(default_dir,'bureau.csv')) bureau_bal = pd.read_csv(os.path.join(default_dir,'bureau_balance.csv')) pos_bal, install, card_bal = get_balance_data() return apps, prev, bureau, bureau_bal, pos_bal, install, card_bal<create_dataframe>
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64,(3,3), padding = 'same', activation='relu', input_shape=(28, 28, 1)) , tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Dropout(0.25), tf.keras.layers.Conv2D(64,(3,3), padding = 'same', activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Dropout(0.25), tf.keras.layers.Conv2D(128,(3,3), padding = 'same', activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Dropout(0.25), tf.keras.layers.Conv2D(128,(3,3), padding = 'same', activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Dropout(0.25), tf.keras.layers.Flatten() , tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='softmax') ] )
Digit Recognizer
21,256,766
apps, prev, bureau, bureau_bal, pos_bal, install, card_bal = get_dataset()<categorify>
optimizer = Adam(learning_rate = 0.001, beta_1 = 0.9, beta_2 = 0.999) model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics = ['accuracy'] )
Digit Recognizer
21,256,766
apps_all = get_apps_all_with_all_agg(apps, prev, bureau, bureau_bal, pos_bal, install, card_bal) apps_all = get_apps_all_encoded(apps_all) apps_all_train, apps_all_test = get_apps_all_train_test(apps_all) clf = train_apps_all(apps_all_train )<save_to_csv>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.6, min_lr=0.00001 )
Digit Recognizer
21,256,766
output_dir = ".. /output/kaggle/working/" preds = clf.predict_proba(apps_all_test.drop(['SK_ID_CURR'], axis=1)) [:, 1 ] apps_all_test['TARGET'] = preds apps_all_test[['SK_ID_CURR', 'TARGET']]<save_to_csv>
batch_size = 64 epochs = 30
Digit Recognizer
21,256,766
apps_all_test[['SK_ID_CURR', 'TARGET']].to_csv('submission.csv', index=False )<import_modules>
train_datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, shear_range = 0.1, horizontal_flip=False, vertical_flip=False ) train_datagen.fit(x_train )
Digit Recognizer
21,256,766
from lightgbm import plot_importance<load_from_csv>
history = model.fit( train_datagen.flow(x_train,y_train,batch_size = batch_size), validation_data =(x_test,y_test), batch_size = batch_size, steps_per_epoch = x_train.shape[0]//batch_size, epochs = epochs, verbose = 1, callbacks=[learning_rate_reduction] )
Digit Recognizer
21,256,766
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()<load_from_csv>
model.evaluate(x_test,y_test )
Digit Recognizer
21,256,766
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()<define_variables>
test_pred = model.predict(data) test_pred = np.argmax(test_pred,axis=1) print(test_pred.shape )
Digit Recognizer
21,256,766
women = train_data.loc[train_data.Sex == 'female']["Survived"] rate_women = sum(women)/len(women) print("% of women who survived:", rate_women )<define_variables>
sample_submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv") sample_submission
Digit Recognizer
21,256,766
men = train_data.loc[train_data.Sex == 'male']["Survived"] rate_men = sum(men)/len(men) print("% of men who survived:", rate_men )<save_to_csv>
index = sample_submission.ImageId data = {'ImageId' : index,'Label': test_pred} df = pd.DataFrame(data) df.head
Digit Recognizer
21,256,766
<concatenate><EOS>
df.to_csv('submission.csv', index=False )
Digit Recognizer
21,104,380
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
import pandas as pd
Digit Recognizer
21,104,380
combine = [train_data, test_data]<feature_engineering>
mnist_test = pd.read_csv("/kaggle/input/mnist-fashion-data-classification/mnist_test.csv") mnist_train = pd.read_csv("/kaggle/input/mnist-fashion-data-classification/mnist_train.csv")
Digit Recognizer
21,104,380
for dataset in combine: dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False) pd.crosstab(train_data['Title'], train_data['Sex'] )<feature_engineering>
sample_submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv") train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
21,104,380
for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train_data[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<categorify>
test['dataset'] = 'test'
Digit Recognizer
21,104,380
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_data.head()<concatenate>
train['dataset'] = 'train'
Digit Recognizer
21,104,380
train_data = train_data.drop(['Name', 'PassengerId'], axis=1) test_data = test_data.drop(['Name'], axis=1) combine = [train_data, test_data] train_data.shape, test_data.shape<categorify>
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()
Digit Recognizer
21,104,380
for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int) train_data.head()<define_variables>
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True) labels = mnist['label'].values mnist.drop('label', axis=1, inplace=True) mnist.columns = cols
Digit Recognizer
21,104,380
guess_ages = np.zeros(( 2,3)) guess_ages<find_best_params>
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values
Digit Recognizer
21,104,380
for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i)& \ (dataset['Pclass'] == j+1)]['Age'].dropna() age_guess = guess_df.median() guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),\ 'Age'] = guess_ages[i,j] dataset['Age'] = dataset['Age'].astype(int) train_data.head()<sort_values>
for i in range(len(idx_mnist)) : if dataset_from[i] == 'test': sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]
Digit Recognizer
21,104,380
<feature_engineering><EOS>
sample_submission.to_csv('submission.csv', index=False )
Digit Recognizer
21,021,971
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
tf.random.set_seed(42) %matplotlib inline pd.set_option('display.max_rows', 10) pd.set_option('display.max_columns', None) pd.set_option('float_format', '{:f}'.format) mpl.rcParams['figure.dpi'] = 600 warnings.filterwarnings('ignore') tf.get_logger().setLevel('INFO') train_df = pd.read_csv('.. /input/digit-recognizer/train.csv') test_df = pd.read_csv('.. /input/digit-recognizer/test.csv') submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv', index_col = 'ImageId') x = train_df.drop(columns = 'label' ).values.reshape(-1, 28, 28) y = train_df['label'].values x_test = test_df.values.reshape(-1, 28, 28) n_labels = len(np.unique(y)) del train_df, test_df gc.collect()
Digit Recognizer
21,021,971
train_data = train_data.drop(['AgeBand'], axis=1) combine = [train_data, test_data] train_data.head()<sort_values>
def define_model(input_shape, n_classes, n_conv_branches, dropout): inputs = layers.Input(shape = input_shape) b_in = layers.experimental.preprocessing.Rescaling(1./ 255 )(inputs) branches = [b_in] * n_conv_branches for i in range(n_conv_branches): for filter_size in [32, 64, 128, 128]: branches[i] = layers.Conv2D( filters = filter_size, kernel_size = 3, padding = 'same', )(branches[i]) branches[i] = layers.MaxPool2D(pool_size =(2, 2))(branches[i]) branches[i] = layers.ReLU()(branches[i]) branches[i] = layers.Dropout(dropout )(branches[i]) if n_conv_branches > 1: b_out = layers.concatenate(branches) b_out = layers.Flatten()(b_out) else: b_out = layers.Flatten()(branches[0]) b_out = layers.Dense(units = 128 )(b_out) b_out = layers.BatchNormalization()(b_out) b_out = layers.ReLU()(b_out) b_out = layers.Dropout(dropout )(b_out) outputs = layers.Dense(units = n_classes )(b_out) return Model(inputs, outputs )
Digit Recognizer
21,021,971
for dataset in combine: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 train_data[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<feature_engineering>
N_SPLITS = 10 CHECKPOINT_DIR = './checkpoint' cv = StratifiedKFold(n_splits = N_SPLITS, random_state = 42, shuffle = True) oof_pred = np.zeros(( x_test.shape[0], n_labels)) cv_val_scores = np.zeros(N_SPLITS) histories = [] k = 0 for train_i, val_i in cv.split(x, y): x_train = x[train_i, :] x_valid = x[val_i, :] y_train = y[train_i] y_valid = y[val_i] model = define_model(( x.shape[1], x.shape[2], 1), n_labels, 2, 0.2) gc.collect() optimizer = Adam( learning_rate = 5e-4, ) model.compile( optimizer = optimizer, loss = SparseCategoricalCrossentropy(from_logits = True), metrics = ['accuracy'] ) checkpoint_call = ModelCheckpoint( filepath = CHECKPOINT_DIR, save_weights_only = True, monitor = 'val_accuracy', mode = 'max', save_best_only = True ) stopping_call = EarlyStopping( monitor = 'val_accuracy', patience = 50, mode = 'max' ) history = model.fit( x_train, y_train, validation_data =(x_valid, y_valid), epochs = 200, callbacks = [checkpoint_call, stopping_call], batch_size = 1024, ) histories += [history] model.load_weights(CHECKPOINT_DIR) predictor_model = tf.keras.Sequential([model, layers.Softmax() ]) cv_val_scores[k] = model.evaluate(x_valid, y_valid)[1] oof_pred += predictor_model.predict(x_test)/ N_SPLITS k += 1
Digit Recognizer
21,021,971
for dataset in combine: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1 train_data[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean()<drop_column>
print('Validation AUC: {:.6} ± {:.4}'.format(cv_val_scores.mean() , cv_val_scores.std()))
Digit Recognizer
21,021,971
train_data = train_data.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) test_data = test_data.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) combine = [train_data, test_data] train_data.head()<feature_engineering>
submission.loc[:, 'Label'] = np.argmax(oof_pred, axis = 1) submission.to_csv('submission.csv' )
Digit Recognizer
20,918,485
for dataset in combine: dataset['Age*Class'] = dataset.Age * dataset.Pclass train_data.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10 )<save_to_csv>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") train_image = np.array(train.drop(['label'], axis=1), dtype="float32")/ 255 train_image = train_image.reshape(-1, 28, 28, 1) train_label = tf.keras.utils.to_categorical(train['label']) test = np.array(test, dtype="float32")/ 255 test = test.reshape(-1, 28, 28, 1) show_images(train_image[:25], train_label[:25], shape=(5,5))
Digit Recognizer
20,918,485
y = train_data["Survived"] features = ["Pclass", "Sex", "Age", "Embarked", "Title", "IsAlone", "Age*Class"] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission.csv', index=False) print("Your submission was successfully saved!" )<save_to_csv>
( image_train_mnist, label_train_mnist),(image_test_mnist, label_test_mnist)= mnist.load_data() image_mnist = np.concatenate(( image_train_mnist, image_test_mnist)) label_mnist = np.concatenate(( label_train_mnist, label_test_mnist)) image_mnist = image_mnist.reshape(-1,28,28,1) image_mnist = image_mnist.astype(np.float32)/ 255 label_mnist = tf.keras.utils.to_categorical(label_mnist,num_classes=10) images = np.concatenate(( train_image, image_mnist)) labels = np.concatenate(( train_label, label_mnist)) print("training image dataset shape:", images.shape) print("training label dataset shape:", labels.shape) show_images(images[:25], labels[:25], shape=(5,5))
Digit Recognizer
20,918,485
y = train_data["Survived"] features = ["Pclass", "Sex", "Age", "Embarked", "Title", "IsAlone"] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission.csv', index=False) print("Your submission was successfully saved!" )<import_modules>
datagen = tf.keras.preprocessing.image.ImageDataGenerator( rotation_range=20, width_shift_range=0.20, shear_range=15, zoom_range=0.10, validation_split=0.25, horizontal_flip=False ) train_generator = datagen.flow( images, labels, batch_size=256, subset='training', ) validation_generator = datagen.flow( images, labels, batch_size=64, subset='validation', )
Digit Recognizer
20,918,485
import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score<load_from_csv>
def create_model() : model = tf.keras.Sequential([ tf.keras.layers.Reshape(( 28, 28, 1)) , tf.keras.layers.Conv2D(filters=32, kernel_size=(5,5), activation="relu", padding="same", input_shape=(28,28,1)) , tf.keras.layers.MaxPool2D(( 2,2)) , tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu", padding="same"), tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu", padding="same"), tf.keras.layers.MaxPool2D(( 2,2)) , tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation="relu", padding="same"), tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation="relu", padding="same"), tf.keras.layers.MaxPool2D(( 2,2)) , tf.keras.layers.Flatten() , tf.keras.layers.Dense(512, activation="sigmoid"), tf.keras.layers.Dropout(0.25), tf.keras.layers.Dense(512, activation="sigmoid"), tf.keras.layers.Dropout(0.25), tf.keras.layers.Dense(256, activation="sigmoid"), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10, activation="sigmoid") ]) model.compile( optimizer="adam", loss = 'categorical_crossentropy', metrics = ['accuracy'] ) return model model = create_model()
Digit Recognizer
20,918,485
train=pd.read_csv('.. /input/titanic/train.csv' )<count_missing_values>
history = model.fit_generator(train_generator, epochs=60, validation_data=validation_generator, callbacks=[reduce_lr,checkpoint], verbose=1 )
Digit Recognizer
20,918,485
<count_values><EOS>
df = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" ).astype("float32")/ 255.0 res = tf.keras.backend.argmax(model.predict(df)) csv = pd.DataFrame({'ImageId': range(1, len(res)+ 1), "Label": res}) csv.to_csv('submission.csv', index=False )
Digit Recognizer
17,391,097
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import keras from sklearn.model_selection import train_test_split from tensorflow.python.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras import layers, models, utils, callbacks import tensorflow as tf import os
Digit Recognizer
17,391,097
train['Survived'].value_counts(normalize=True )<count_values>
nbr_of_clases = 10 validation_percentage = 0.2 resnet_path = '.. /input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' training_data = pd.read_csv('.. /input/digit-recognizer/train.csv' )
Digit Recognizer
17,391,097
train['Sex'].value_counts()<categorify>
def prepare_data(data_to_transform): data = data_to_transform.copy() data = data.reshape(-1, 28, 28, 1)/ 255 return data
Digit Recognizer
17,391,097
embark=pd.get_dummies(train['Embarked'] )<categorify>
y = training_data['label'].values X = training_data.drop('label',axis = 1 ).values y = keras.utils.to_categorical(y, nbr_of_clases) X_rgb = prepare_data(X) X_train, X_val, y_train, y_val = train_test_split(X_rgb, y, test_size=validation_percentage )
Digit Recognizer
17,391,097
gender=pd.get_dummies(train['Sex'] )<data_type_conversions>
model = models.Sequential([ layers.Conv2D(32, 3, activation='relu', input_shape=(128,128,1), padding='same'), layers.Conv2D(32, 3, activation='relu', padding='same'), layers.MaxPooling2D(padding='same'), layers.Dropout(0.5), layers.Conv2D(64, 3, activation='relu', padding='same'), layers.Conv2D(64, 3, activation='relu', padding='same'), layers.MaxPooling2D(padding='same'), layers.Dropout(0.25), layers.GlobalAveragePooling2D() , layers.Dense(nbr_of_clases, activation='softmax'), ]) model.summary() model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'], )
Digit Recognizer
17,391,097
train['Age'].fillna(30,inplace=True) train['Age']=train['Age'].astype('int' )<feature_engineering>
def get_fitted_data_generator(data): datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zoom_range = 0.1, height_shift_range=0.1, ) datagen.fit(data) return datagen def fit_model_generator(model, X_train, y_train, epochs=1, batch=32, X_val=None, y_val=None): image_nbr = np.size(X_train, 0) training_data_generator = get_fitted_data_generator(X_train) es = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True) rlp = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=1e-10, mode='min', verbose=1) return model.fit_generator(training_data_generator.flow(X_train, y_train, batch_size=batch), steps_per_epoch=(image_nbr//batch),callbacks=[es, rlp],epochs=epochs, validation_data=(X_val, y_val), verbose=1 )
Digit Recognizer
17,391,097
train['Age_grp']=train['Age'].apply(lambda x: age_grp(x))<categorify>
full_data_model = fit_model_generator(model, X_train, y_train, epochs=150,X_val=X_val,y_val=y_val )
Digit Recognizer
17,391,097
age=pd.get_dummies(train['Age_grp'] )<concatenate>
testing_data = pd.read_csv('.. /input/digit-recognizer/test.csv' ).values testing_data = prepare_data(testing_data) def get_predictions(model, data): return np.array([np.argmax(prediction)for prediction in model.predict(data)]) final_predictions = get_predictions(model, testing_data )
Digit Recognizer
17,391,097
train_df=pd.concat([train,embark,gender,age],axis=1 )<feature_engineering>
submission_filename = 'submission.csv' answers = pd.DataFrame({'ImageId':range(1, final_predictions.size + 1),'Label':final_predictions}) answers.to_csv(submission_filename, index=False )
Digit Recognizer
17,391,097
def is_var(val): if val>0: return 1 else: return 0<feature_engineering>
submission_filename = '/kaggle/working/submission.csv' answers.to_csv(submission_filename, index=False )
Digit Recognizer
17,303,075
train_df['Family']=train_df['Parch'] + 1 + train_df['SibSp'] train_df['Parch']=train_df['Parch'].apply(lambda x: is_var(x)) train_df['SibSp']=train_df['SibSp'].apply(lambda x: is_var(x))<define_variables>
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") print(train_data.head() )
Digit Recognizer
17,303,075
sel_cols=['Fare', 'Pclass', 'SibSp', 'Parch', 'C', 'Q', 'S', 'female', 'male', '18-24', '25-34', '35-44', '45+', '<13','13-18', 'Family' ]<correct_missing_values>
X_train = train_data.drop('label',axis=1) X_train = X_train.values X_train = X_train.astype('float32') y_train = train_data['label'] y_train = y_train.values y_train = y_train.astype('float32') X_test = test_data.astype('float32') print("The MNIST dataset has a training set of %d examples." % len(X_train)) print("The MNIST database has a test set of %d examples." % len(X_test))
Digit Recognizer
17,303,075
train_df.fillna(0,inplace=True )<prepare_x_and_y>
X_train = X_train.astype('float32')/255 X_test = X_test.astype('float32')/255
Digit Recognizer
17,303,075
X=train_df[sel_cols]<prepare_x_and_y>
datagen_train = ImageDataGenerator( width_shift_range=0.2, height_shift_range=0.2, rotation_range = 20, horizontal_flip=True, vertical_flip = True, shear_range = 0.1) datagen_train.fit(X_train )
Digit Recognizer
17,303,075
y=train_df['Survived']<split>
random_seed = 46 X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.25, random_state=random_seed) print('Integer-valued labels:') print(y_train[:10]) y_train = np_utils.to_categorical(y_train, 10) y_val = np_utils.to_categorical(y_val, 10) print('One-hot labels:') print(y_train[:10] )
Digit Recognizer
17,303,075
train_X,val_X,train_y,val_y=train_test_split(X,y,test_size=0.3,random_state=1 )<train_model>
model = Sequential() model.add(Conv2D(32,(5, 5), padding='same', input_shape=X_train.shape[1:])) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Conv2D(32,(5, 5), padding='same')) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.3)) model.add(Conv2D(64,(3, 3), padding='same')) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Conv2D(64,(3, 3), padding='same')) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.3)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(10)) model.add(Activation('softmax')) optimizer = optimizers.Adam(lr=0.001) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.summary() print("input shape ",model.input_shape) print("output shape ",model.output_shape )
Digit Recognizer
17,303,075
lr=LogisticRegression(max_iter=400) lr.fit(train_X,train_y )<compute_test_metric>
batch_size = 128 nb_epoch = 20 checkpointer = [ ReduceLROnPlateau(monitor='val_loss', patience=3, verbose=1, factor=0.5, min_lr = 0.00001, cooldown=0), ModelCheckpoint('mnist_model_best.hdf5', monitor='val_accuracy', save_best_only=True, mode='max', verbose=1) ] hist = model.fit(X_train, y_train, epochs=nb_epoch, batch_size=batch_size, validation_data=(X_val,y_val), callbacks=[checkpointer],verbose=1, shuffle=True )
Digit Recognizer
17,303,075
lr.score(train_X,train_y )<compute_test_metric>
model = load_model('mnist_model_best.hdf5') score = model.evaluate(X_val, y_val, verbose=0) accuracy = 100*score[1] print('Validation accuracy: %.4f%%' % accuracy )
Digit Recognizer
17,303,075
lr.score(val_X,val_y )<load_from_csv>
predictions = model.predict_classes(X_test, verbose=1) submissions = pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions}) submissions.to_csv("mnist_MLP_test.csv",index=False )
Digit Recognizer
17,159,331
test=pd.read_csv('.. /input/titanic/test.csv' )<categorify>
import pandas as pd from matplotlib import pyplot import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from numpy import asarray, unique, argmax from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout from keras.optimizers import RMSprop
Digit Recognizer
17,159,331
embark=pd.get_dummies(test['Embarked'] )<categorify>
data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') data.head()
Digit Recognizer
17,159,331
gender=pd.get_dummies(test['Sex'] )<data_type_conversions>
unique = data['label'].unique() print("Unique Numbers :",unique) n_classes = len(unique) print("Number of classes :",n_classes )
Digit Recognizer
17,159,331
test['Age'].fillna(30,inplace=True) test['Age']=test['Age'].astype('int' )<feature_engineering>
x = data.drop(labels = ["label"], axis=1) print("X= ",x )
Digit Recognizer
17,159,331
test['Age_grp']=test['Age'].apply(lambda x: age_grp(x))<categorify>
y = data['label'] print("Target Variable: ",y )
Digit Recognizer
17,159,331
age=pd.get_dummies(test['Age_grp'] )<concatenate>
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.20, random_state=42 )
Digit Recognizer
17,159,331
test_df=pd.concat([test,embark,gender,age],axis=1 )<feature_engineering>
print("X_train: ", x_train) print("X_test: ", y_test) print("Y_train: ", y_train) print("Y_test: ", y_test )
Digit Recognizer
17,159,331
test_df['Family']=test_df['Parch']+1+test_df['SibSp'] test_df['Parch']=test_df['Parch'].apply(lambda x: is_var(x)) test_df['SibSp']=test_df['SibSp'].apply(lambda x: is_var(x))<correct_missing_values>
x_train = x_train.values.reshape(-1,28,28,1) x_test = x_test.values.reshape(-1,28,28,1 )
Digit Recognizer
17,159,331
test_df.fillna(0,inplace=True )<prepare_x_and_y>
x_train = x_train.astype('float32')/ 255.0 x_test = x_test.astype('float32')/ 255.0
Digit Recognizer
17,159,331
test_X=test_df[sel_cols]<predict_on_test>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape = in_shape)) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
17,159,331
test_y=lr.predict(test_X )<load_from_csv>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
Digit Recognizer
17,159,331
sub=pd.read_csv('.. /input/titanic/gender_submission.csv' )<prepare_output>
model.compile(optimizer= 'adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy'])
Digit Recognizer
17,159,331
sub['Survived']=test_y<save_to_csv>
model.fit(x_train, y_train, validation_split=0.2, epochs=100, batch_size=128, verbose= 1)
Digit Recognizer
17,159,331
sub.to_csv('submission.csv',index=False )<install_modules>
loss, accuracy = model.evaluate(x_test, y_test, verbose=1) print('Accuracy: %.3f' %accuracy) print('Loss: ',loss )
Digit Recognizer
17,159,331
!pip install autogluon --user clear_output()<import_modules>
image = x_test[2] ypred = model.predict(asarray([image])) print('Prediction: Class =%d' %argmax(ypred))
Digit Recognizer
17,159,331
import numpy as np import pandas as pd import os import random from autogluon.tabular import TabularDataset, TabularPredictor<define_variables>
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') test_data.head()
Digit Recognizer
17,159,331
TRAIN_PATH = ".. /input/titanic/train.csv" TEST_PATH = ".. /input/titanic/test.csv" SAMPLE_SUBMISSION_PATH = ".. /input/titanic/gender_submission.csv" SUMISSION_PATH = "submission.csv" TARGET = 'Survived' EVAL_METRIC = "roc_auc" SAVE_PATH = 'agModels-predictClass' DEFAULT_RANDOM_SEED = 2021 def seedBasic(seed=DEFAULT_RANDOM_SEED): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) seedBasic()<create_dataframe>
test = test_data/255.0
Digit Recognizer
17,159,331
train = TabularDataset(TRAIN_PATH) test = TabularDataset(TEST_PATH )<train_model>
test_final_Data = test.values.reshape(-1,28,28,1 )
Digit Recognizer
17,159,331
predictor = TabularPredictor(label=TARGET,eval_metric=EVAL_METRIC, path=SAVE_PATH ).fit(train )<predict_on_test>
label = model.predict(test_final_Data) print(label )
Digit Recognizer
17,159,331
y_pred = predictor.predict(test )<save_to_csv>
label = np.argmax(label, axis=1)
Digit Recognizer
17,159,331
submission = pd.read_csv(SAMPLE_SUBMISSION_PATH) submission[TARGET] = y_pred submission.to_csv(SUMISSION_PATH,index=False) submission.head()<set_options>
sample_submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') sample_submission.head()
Digit Recognizer
17,159,331
init_notebook_mode(connected=True) <set_options>
index = test_data.index.values + 1 data = {'ImageId' : index, "Label" : label} df = pd.DataFrame(data=data) df.head()
Digit Recognizer
17,159,331
warnings.filterwarnings('ignore' )<load_from_csv>
submit_file = pd.DataFrame({'ImageId' : index, "Label" : label.astype(int ).ravel() }) submit_file.to_csv("submission.csv",index = False)
Digit Recognizer
18,990,193
train_df=pd.read_csv("/kaggle/input/titanic/train.csv") test_df = pd.read_csv("/kaggle/input/titanic/test.csv" )<concatenate>
import tensorflow as tf import cv2 as cv import matplotlib.pyplot as plt from tensorflow.keras import models, layers
Digit Recognizer
18,990,193
train_len = len(train_df) combined = train_df.append(test_df,ignore_index=True) combined.fillna(np.nan )<count_missing_values>
train_data = pd.read_csv('.. /input/digit-recognizer/train.csv') test_data = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
18,990,193
combined.isnull().sum()<groupby>
model = models.Sequential() model.add(layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(1,1), padding = 'same', activation='relu', input_shape = data_shape)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add(layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(1,1), padding = 'same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.35)) model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1,1), padding = 'same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1,1), padding = 'same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.35)) model.add(layers.Conv2D(filters=64, kernel_size=(2, 2), strides=(1,1), padding = 'same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add(layers.Conv2D(filters=64, kernel_size=(2, 2), strides=(1,1), padding = 'same', activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add(layers.Dropout(0.35)) model.add(layers.Flatten()) model.add(layers.Dense(units=256, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.35)) model.add(layers.Dense(units=256, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.35)) model.add(layers.Dense(units=10, activation='softmax')) model.summary()
Digit Recognizer
18,990,193
combined.groupby(['Pclass','Sex'])['Age'].mean()<feature_engineering>
Digit Recognizer
18,990,193
combined['AgeGroup'] = 'adult' combined.loc[combined['Name'].str.contains('Master'),'AgeGroup'] = "child" combined.loc[combined['Age'] <= 14.0,'AgeGroup'] = "child" combined.loc[(combined['Age'].isnull())&(combined['Name'].str.contains('Miss')) &(combined['Parch'] != 0),'AgeGroup'] = "child"<groupby>
Digit Recognizer