kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
10,493,342 | train_data.drop('WheelTypeID',inplace=True,axis=1)
test_data.drop('WheelTypeID',inplace=True,axis=1 )<count_values> | df_age = feat_to_binning("Age",5)
print(df_age['Age_bin_round'].value_counts())
df_age.head() | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['WheelType'].value_counts()<count_values> | X_train_pre['Age'] = df_age.loc[:1628,'Age_bin_round']
X_test_pre['Age'] = df_age.loc[1628:,'Age_bin_round']
| Summer Analytics 2020 Capstone Project |
10,493,342 | test_data['WheelType'].value_counts()<data_type_conversions> | df_bin = pd.DataFrame()
df_con = pd.DataFrame() | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['WheelType'].fillna(value='WheelType_unk',inplace=True)
test_data['WheelType'].fillna(value='WheelType_unk',inplace=True )<count_values> | bestfeatures = SelectKBest(score_func=chi2, k=21)
fit = bestfeatures.fit(X,y ) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['WheelType'].value_counts()<count_values> | dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['Nationality'].value_counts()<count_values> | featureScores | Summer Analytics 2020 Capstone Project |
10,493,342 | test_data['Nationality'].value_counts()<data_type_conversions> | print(featureScores.nlargest(21,'Score')) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['Nationality'].fillna(value='Nationality_unk',inplace=True)
test_data['Nationality'].fillna(value='Nationality_unk',inplace=True )<count_values> | bestfeatures_f = SelectKBest(score_func=f_classif, k=39)
fit_f = bestfeatures.fit(X,y ) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['Size'].value_counts()<count_values> | dfscores_f = pd.DataFrame(fit_f.scores_)
dfcolumns_f = pd.DataFrame(X.columns)
featureScores_f = pd.concat([dfcolumns_f,dfscores_f],axis=1)
featureScores_f.columns = ['Specs','Score'] | Summer Analytics 2020 Capstone Project |
10,493,342 | test_data['Size'].value_counts()<data_type_conversions> | X_imp = X_train_pre
y_imp = y_train
model = xgboost.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1.0, gamma=0.5, gpu_id=-1,
importance_type='gain', interaction_constraints='',
learning_rate=0.03, max_delta_step=0, max_depth=100,
min_child_weight=1, missing=None, monotone_constraints='() ',
n_estimators=500, n_jobs=0, num_parallel_tree=1,
objective='binary:logistic', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, subsample=0.8,
tree_method='exact', validate_parameters=1, verbosity=None)
model.fit(X_imp,y_imp ) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['Size'].fillna(value='Size_unk',inplace=True)
test_data['Size'].fillna(value="Size_unk",inplace=True )<count_values> | X_train = X_train_pre.drop(['Id','Behaviour','PerformanceRating', 'PercentSalaryHike',
'YearsAtCompany','TotalWorkingYears', 'EmployeeNumber',
'YearsSinceLastPromotion'], axis=1)
X_test = X_test_pre.drop(['Id','Behaviour','PerformanceRating', 'PercentSalaryHike',
'YearsAtCompany','TotalWorkingYears', 'EmployeeNumber',
'YearsSinceLastPromotion'], axis=1)
print(X_train.shape)
print(X_test.shape ) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['TopThreeAmericanName'].value_counts()<count_values> | X_train_pre_cat = df_combine.iloc[:1628,:]
X_train_pre_cat = X_train_pre_cat.drop(['Attrition'], axis =1)
X_test_pre_cat = df_combine.iloc[1628:,:]
X_test_pre_cat = X_test_pre_cat.drop(['Attrition'], axis =1)
X_train_pre_cat['Age'] = df_age.loc[:1628,'Age_bin_round']
X_test_pre_cat['Age'] = df_age.loc[1628:,'Age_bin_round']
print(X_train_pre_cat.shape)
print(X_test_pre_cat.shape ) | Summer Analytics 2020 Capstone Project |
10,493,342 | test_data['TopThreeAmericanName'].value_counts()<data_type_conversions> | X_train_cat = X_train_pre_cat.drop(['Id','Behaviour','PerformanceRating', 'PercentSalaryHike',
'YearsAtCompany','TotalWorkingYears', 'EmployeeNumber',
'YearsSinceLastPromotion'], axis=1)
X_test_cat = X_test_pre_cat.drop(['Id','Behaviour','PerformanceRating', 'PercentSalaryHike',
'YearsAtCompany','TotalWorkingYears', 'EmployeeNumber',
'YearsSinceLastPromotion'], axis=1)
print(X_train_cat.shape)
print(X_test_cat.shape)
| Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['TopThreeAmericanName'].fillna(value='Top_unk',inplace=True)
test_data['TopThreeAmericanName'].fillna(value='Top_unk',inplace=True )<count_values> | all_catg_col = ['Age'] + ordinal_catg_col + nominal_catg_col
indices_cat = []
for col in all_catg_col:
if(col in list(X_train_cat.columns)) :
indices_cat.append(X_train_cat.columns.get_loc(col))
indices_cat.sort()
print(indices_cat)
train_pool_cat = Pool(X_train_cat,
y_train,
indices_cat ) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['PRIMEUNIT'].value_counts()<count_values> | model_cat = CatBoostClassifier(random_state = 51,eval_metric = 'AUC')
random_grid_cat = {'learning_rate': [0.05, 0.08, 0.1, 0.15, 0.2, 0.3],
'depth': [4, 6,10,15,20,30,40,50,60,70],
'l2_leaf_reg': [1, 3, 5, 7, 9]}
randomized_search_cat = model_cat.randomized_search(random_grid_cat, train_pool_cat, cv=5,plot=True)
| Summer Analytics 2020 Capstone Project |
10,493,342 | test_data['PRIMEUNIT'].value_counts()<data_type_conversions> | randomized_search_cat | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['PRIMEUNIT'].fillna(value="Prime_unk",inplace=True)
test_data['PRIMEUNIT'].fillna(value="Prime_unk",inplace=True )<categorify> |
model_train = CatBoostClassifier(iterations=170, learning_rate=0.1, random_state = 51,eval_metric = 'AUC',loss_function ='CrossEntropy')
model_train.fit(train_pool_cat)
acc_catboost = round(model_train.score(X_train_cat, y_train)* 100, 2 ) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['AUCGUART'].replace("AGREEN","GREEN",inplace=True)
test_data['AUCGUART'].replace("ARED","RED",inplace=True )<data_type_conversions> | cv_params = model_train.get_params()
cv_data = cv(train_pool_cat,
cv_params,
fold_count=5,
plot=True)
acc_cv_catboost = round(np.max(cv_data['test-AUC-mean'])* 100, 2)
| Summer Analytics 2020 Capstone Project |
10,493,342 | train_data['AUCGUART'].fillna(value="AUC_unk",inplace=True)
test_data['AUCGUART'].fillna(value="AUC_unk",inplace=True )<drop_column> | print('train accuracy: ' , acc_catboost)
print("CV Accuracy: " ,acc_cv_catboost)
cv_data.head() | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice',
'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice',
'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice',
'MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice'],
inplace=True,axis=1)
test_data.drop(['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice',
'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice',
'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice',
'MMRCurrentRetailAveragePrice','MMRCurrentRetailCleanPrice'],
inplace=True,axis=1 )<drop_column> | y_pred=model_train.predict_proba(X_test_cat)
pred=pd.DataFrame(y_pred[:,1])
sub_df=pd.read_csv('.. /input/summeranalytics2020/Sample_submission.csv')
datasets=pd.concat([sub_df['Id'],pred],axis=1)
datasets.columns=['Id','Attrition']
datasets.to_csv('Catboost_submission_temp.csv',index=False ) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data.drop('PurchDate',axis=1,inplace=True)
test_data.drop("PurchDate",axis=1,inplace=True )<drop_column> | xbg_classifier = xgboost.XGBClassifier(scoring = 'roc_auc', random_state = 51)
booster=['gbtree']
hyperparameter_grid = {
'n_estimators' : [100, 500, 900, 1100, 1500],
'min_child_weight': [1, 2, 3, 5, 7, 9],
'gamma': [0.5, 1, 1.5, 2, 5],
'subsample': [0.6, 0.8, 1.0],
'colsample_bytree': [0.6, 0.8, 1.0],
'max_depth': [5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120],
'learning_rate' : [0.005,0.01,0.03, 0.05, 0.15,0.3, 0.45, 0.55]
}
folds = 5
skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 51)
random_cv = RandomizedSearchCV(estimator=xbg_classifier,
param_distributions=hyperparameter_grid,
cv=skf.split(X_train,y_train), n_iter=50,
scoring = 'roc_auc',n_jobs = 4,
verbose = 5,
return_train_score = True ) | Summer Analytics 2020 Capstone Project |
10,493,342 | train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes!='object'<drop_column> | random_cv.fit(X_train, y_train ) | Summer Analytics 2020 Capstone Project |
10,493,342 | not_categorical=train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes!='object']<feature_engineering> | classifier_t = random_cv.best_estimator_
print(classifier_t)
classifier_t.fit(X_train,y_train ) | Summer Analytics 2020 Capstone Project |
10,493,342 | for i in not_categorical:
maximum=np.max(train_data[i])
train_data[i]=train_data[i]/maximum
maximum_test=np.max(test_data[i])
test_data[i]=test_data[i]/maximum_test<drop_column> | acc_xgb = round(classifier_t.score(X_train, y_train)* 100, 2)
train_pred = model_selection.cross_val_predict(classifier_t,
X_train,
y_train,
cv=5,
n_jobs = -1)
acc_cv = round(metrics.accuracy_score(y_train, train_pred)* 100, 2)
print(acc_xgb)
print(acc_cv ) | Summer Analytics 2020 Capstone Project |
10,493,342 | categorical=train_data.drop(['RefId','IsBadBuy'],axis=1 ).columns[train_data.drop(['RefId','IsBadBuy'],axis=1 ).dtypes=='object']<filter> | y_pred=classifier_t.predict_proba(X_test)
pred=pd.DataFrame(y_pred[:,1])
sub_df=pd.read_csv('.. /input/summeranalytics2020/Sample_submission.csv')
datasets=pd.concat([sub_df['Id'],pred],axis=1)
datasets.columns=['Id','Attrition']
datasets.to_csv('XBGC_random_submission_temp.csv',index=False ) | Summer Analytics 2020 Capstone Project |
5,023,135 | train_data[categorical[0]]<categorify> | train = pd.read_csv('.. /input/train.csv',index_col = 'Id')
test = pd.read_csv('.. /input/test.csv',index_col = 'Id' ) | I-RICH ML COMPETITION |
5,023,135 | pd.get_dummies(train_data[categorical[0]] )<categorify> | train.price= np.log1p(train["price"] ) | I-RICH ML COMPETITION |
5,023,135 | for i in categorical:
dummies=pd.get_dummies(train_data[i])
dummies.columns=str(i)+'_'+dummies.columns
train_data=pd.concat([train_data,dummies],axis=1)
train_data.drop(i,inplace=True,axis=1)
dummies=pd.get_dummies(test_data[i])
dummies.columns=str(i)+'_'+dummies.columns
test_data=pd.concat([test_data,dummies],axis=1)
test_data.drop(i,inplace=True,axis=1 )<feature_engineering> | train = train[~(( train.bathrooms>6)&(train.price<14)) ] | I-RICH ML COMPETITION |
5,023,135 | for i in train_data.drop('IsBadBuy',axis=1 ).columns:
if i not in test_data.columns:
test_data[i]=np.zeros(len(test_data))<feature_engineering> | ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.price.values
all_data = pd.concat(( train, test))
all_data.drop(['price'], axis=1, inplace=True)
all_data.shape
| I-RICH ML COMPETITION |
5,023,135 | for i in test_data.columns:
if i not in train_data.columns:
train_data[i]=np.zeros(len(train_data))<drop_column> | all_data_na =(all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index ).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head(20 ) | I-RICH ML COMPETITION |
5,023,135 | test_data=test_data[train_data.drop("IsBadBuy",axis=1 ).columns]<prepare_x_and_y> | all_data['zipcode'] = all_data['zipcode'].apply(str ) | I-RICH ML COMPETITION |
5,023,135 | X=train_data.drop(['RefId','IsBadBuy'],axis=1)
y=train_data['IsBadBuy']<split> | all_data['YrSold'] =all_data['date'].apply(lambda s:int(s[:4]))
all_data['MonthSold'] =all_data['date'].apply(lambda s:(s[4:6]))
all_data['DaySold'] =all_data['date'].apply(lambda s:int(s[6:8]))
all_data.drop(['date'], axis=1, inplace=True)
all_data['rooms'] = all_data['bedrooms']+all_data['bathrooms']
all_data.head() | I-RICH ML COMPETITION |
5,023,135 | X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=42 )<import_modules> | numerical_cols = [cname for cname in all_data.columns if
all_data[cname].dtype in ['int64', 'float64']]
skewness = all_data[numerical_cols].apply(lambda x: skew(x))
skewness = skewness[abs(skewness)> 0.9]
print(str(skewness.shape[0])+ " skewed numerical features to log transform")
skewed_features = list(skewness.index)
all_data[skewed_features] = np.log1p(all_data[skewed_features] ) | I-RICH ML COMPETITION |
5,023,135 | from sklearn.neighbors import KNeighborsClassifier<import_modules> | Dummies_all_data = pd.get_dummies(all_data ) | I-RICH ML COMPETITION |
5,023,135 | from sklearn.neighbors import KNeighborsClassifier<import_modules> | X_train = Dummies_all_data[:ntrain]
X_test = Dummies_all_data[ntrain:] | I-RICH ML COMPETITION |
5,023,135 | from sklearn.neighbors import KNeighborsClassifier<train_model> | n_folds=5
def rmsle_cv(model):
rmsle= np.sqrt(-cross_val_score(model, X_train, y_train, scoring="neg_mean_squared_error", cv = 5))
print("
score rmsle: {:.4f}({:.4f})
".format(rmsle.mean() , rmsle.std()))
return(rmsle)
| I-RICH ML COMPETITION |
5,023,135 | KNN=KNeighborsClassifier(n_neighbors=11)
KNN.fit(X_train,y_train )<compute_test_metric> | model_xgb = XGBRegressor(colsample_bytree=1, gamma=0.0468,
learning_rate=0.053, max_depth=3,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state =7, nthread = -1)
score_xgb = rmsle_cv(model_xgb)
| I-RICH ML COMPETITION |
5,023,135 | KNN.score(X_test,y_test )<predict_on_test> | model_LGB = lgb.LGBMRegressor(objective='regression',num_leaves=5,
learning_rate=0.05, n_estimators=720,
max_bin = 55, bagging_fraction = 0.8,
bagging_freq = 5, feature_fraction = 0.2319,
feature_fraction_seed=9, bagging_seed=9,
min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)
score_LGB = rmsle_cv(model_LGB ) | I-RICH ML COMPETITION |
5,023,135 | predict=KNN.predict(test_data.drop('RefId',axis=1))<prepare_output> | model_GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
score_GBoost = rmsle_cv(model_GBoost ) | I-RICH ML COMPETITION |
5,023,135 | Submission=pd.DataFrame(data=predict,columns=['IsBadBuy'])
Submission.head()<prepare_output> | ENet = make_pipeline(RobustScaler() , ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
score_ENet = rmsle_cv(ENet ) | I-RICH ML COMPETITION |
5,023,135 | Submission['RefId']=test_data['RefId']
Submission.set_index('RefId',inplace=True )<save_to_csv> | lasso = make_pipeline(RobustScaler() , Lasso(alpha =0.0005, random_state=1))
score_lasso = rmsle_cv(lasso ) | I-RICH ML COMPETITION |
5,023,135 | Submission.head()
Submission.to_csv('Submission.csv' )<import_modules> | model_GBoost.fit(X_train,y_train)
model_xgb.fit(X_train,y_train)
model_LGB.fit(X_train,y_train)
lasso.fit(X_train,y_train)
ENet.fit(X_train,y_train)
lasso.fit(X_train,y_train)
preds_test = np.expm1(model_xgb.predict(X_test)*0.50 +(
model_LGB.predict(X_test)*0.10+ model_GBoost.predict(X_test)*0.30)+(
lasso.predict(X_test)*0.05+ENet.predict(X_test)*0.05))
output = pd.DataFrame({'Id': X_test.index,
'price': preds_test})
output.to_csv('submission.csv', index=False ) | I-RICH ML COMPETITION |
10,604,656 | filename = ".. /input/submission/submission.csv"
df = pd.read_csv(filename)
<load_from_csv> | df_order = pd.read_csv('.. /input/open-shopee-code-league-logistic/delivery_orders_march.csv' ) | [Open] Shopee Code League - Logistics |
10,604,656 | series = read_csv('.. /input/submission/submission.csv', header=0, index_col=0, parse_dates=True, squeeze=True)
series.plot()
pyplot.show()
<load_from_csv> | df_SLA = pd.read_excel('.. /input/open-shopee-code-league-logistic/SLA_matrix.xlsx' ) | [Open] Shopee Code League - Logistics |
10,604,656 | series = read_csv('.. /input/submission/submission.csv', header=0, index_col=0, parse_dates=True, squeeze=True)
lag_plot(series )<save_to_csv> | SLA_Matrix = [[3, 5, 7, 7], [5, 5, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]] | [Open] Shopee Code League - Logistics |
10,604,656 | Submission.to_csv('submission.csv',index=False )<compute_test_metric> | time_needed = []
for(o, d)in zip(origin, destination):
time_needed.append(SLA_Matrix[o-1][d-1])
print(time_needed.count(3))
print(time_needed.count(5))
print(time_needed.count(7)) | [Open] Shopee Code League - Logistics |
10,604,656 | def RMSLE(pred,actual):
return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv> | def converttodate(epoch_time):
return datetime.datetime.fromtimestamp(epoch_time ).strftime("%Y-%m-%d" ) | [Open] Shopee Code League - Logistics |
10,604,656 | pd.set_option('mode.chained_assignment', None)
test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv")
train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv")
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region','Province_State','Date'])
test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering> | pick_date = df_order.pick.tolist()
first_attempt = df_order['1st_deliver_attempt'].tolist()
second_attempt = df_order['2nd_deliver_attempt'].tolist()
pick = []
first = []
second = []
for(i, j, k)in zip(pick_date, first_attempt, second_attempt):
pick.append(converttodate(i))
first.append(converttodate(j))
if np.isnan(k):
second.append(np.nan)
else :
second.append(converttodate(k)) | [Open] Shopee Code League - Logistics |
10,604,656 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 20):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
model = make_pipeline(PolynomialFeatures(2), BayesianRidge())
model.fit(adjusted_X_train,adjusted_y_train_confirmed)
y_hat_confirmed = model.predict(adjusted_X_pred)
model.fit(adjusted_X_train,adjusted_y_train_fatalities)
y_hat_fatalities = model.predict(adjusted_X_pred)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
pred_data['ConfirmedCases_hat'] = np.concatenate(( np.repeat(0, len(pred_data)- len(y_hat_confirmed)) , y_hat_confirmed), axis = 0)
pred_data['Fatalities_hat'] = np.concatenate(( np.repeat(float(0), len(pred_data)- len(y_hat_fatalities)) , y_hat_fatalities), axis = 0)
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_1 = df_val.copy()<compute_test_metric> | df_solution = pd.DataFrame() ;
df_solution['orderid'] = df_order.orderid
df_solution['origin'] = origin
df_solution['destination'] = destination
df_solution['time_needed'] = time_needed
df_solution['pick'] = pick
df_solution['first'] = first
df_solution['second'] = second
| [Open] Shopee Code League - Logistics |
10,604,656 | RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric> | is_late = []
weekmask = [1, 1, 1, 1, 1, 1, 0]
for(i, j, k, l)in zip(df_solution['pick'].tolist() , df_solution['first'].tolist() , df_solution['second'].tolist() ,df_solution['time_needed']):
if(abs(np.busday_count(j,i)) - 1 > l):
is_late.append(1)
else :
is_late.append(0 ) | [Open] Shopee Code League - Logistics |
10,604,656 | RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<groupby> | df_submit = pd.DataFrame() ;
df_submit['orderid'] = df_solution['orderid']
df_submit['is_late'] = is_late
| [Open] Shopee Code League - Logistics |
10,604,656 | <feature_engineering><EOS> | df_submit.to_csv('solution_noholiday.csv',index=False ) | [Open] Shopee Code League - Logistics |
8,861,736 | <SOS> metric: auc Kaggle data source: car-loan-default<feature_engineering> | import pandas as pd
import numpy as np
from datetime import timedelta, date
from sklearn.model_selection import train_test_split, GridSearchCV, KFold
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from lightgbm import LGBMClassifier | Car loan default |
8,861,736 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 20):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_3 = df_val.copy()<compute_test_metric> | warnings.filterwarnings('ignore' ) | Car loan default |
8,861,736 | method_list = ['Poly Bayesian Ridge','Exponential Smoothing','SARIMA']
method_val = [df_val_1,df_val_2,df_val_3]
for i in range(0,3):
df_val = method_val[i]
method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)]
print(method_score )<save_to_csv> | train = pd.read_csv('/kaggle/input/car-loan/car_loan_train.csv')
test = pd.read_csv('/kaggle/input/car-loan/car_loan_test.csv')
train.columns = ["".join(c if c.isalnum() else "_" for c in str(x)) for x in train.columns]
test.columns = ["".join(c if c.isalnum() else "_" for c in str(x)) for x in test.columns] | Car loan default |
8,861,736 | df_val = df_val_3
submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']]
submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
submission.to_csv('submission.csv', index=False)
submission<filter> | train = train.fillna('other')
test = test.fillna('other' ) | Car loan default |
8,861,736 | df_worldinfor[df_worldinfor['Country'] == 'Vietnam']<set_options> | train = train.replace({'PERFORM_CNS_SCORE_DESCRIPTION':{'C-Very Low Risk':'Low', 'A-Very Low Risk':'Low',
'B-Very Low Risk':'Low', 'D-Very Low Risk':'Low',
'F-Low Risk':'Low', 'E-Low Risk':'Low', 'G-Low Risk':'Low',
'H-Medium Risk': 'Medium', 'I-Medium Risk': 'Medium',
'J-High Risk':'High', 'K-High Risk':'High','L-Very High Risk':'High',
'M-Very High Risk':'High','Not Scored: More than 50 active Accounts found':'Not Scored',
'Not Scored: Only a Guarantor':'Not Scored','Not Scored: Not Enough Info available on the customer':'Not Scored',
'Not Scored: No Activity seen on the customer(Inactive)':'Not Scored','Not Scored: No Updates available in last 36 months':'Not Scored',
'Not Scored: Sufficient History Not Available':'Not Scored', 'No Bureau History Available':'Not Scored'
}})
test = test.replace({'PERFORM_CNS_SCORE_DESCRIPTION':{'C-Very Low Risk':'Low', 'A-Very Low Risk':'Low',
'B-Very Low Risk':'Low', 'D-Very Low Risk':'Low',
'F-Low Risk':'Low', 'E-Low Risk':'Low', 'G-Low Risk':'Low',
'H-Medium Risk': 'Medium', 'I-Medium Risk': 'Medium',
'J-High Risk':'High', 'K-High Risk':'High','L-Very High Risk':'High',
'M-Very High Risk':'High','Not Scored: More than 50 active Accounts found':'Not Scored',
'Not Scored: Only a Guarantor':'Not Scored','Not Scored: Not Enough Info available on the customer':'Not Scored',
'Not Scored: No Activity seen on the customer(Inactive)':'Not Scored','Not Scored: No Updates available in last 36 months':'Not Scored',
'Not Scored: Sufficient History Not Available':'Not Scored', 'No Bureau History Available':'Not Scored'
}} ) | Car loan default |
8,861,736 | plt.style.use('ggplot')
font = {'family' : 'meiryo'}
plt.rc('font', **font )<load_from_csv> | train['Date_of_Birth'] = pd.to_datetime(train['Date_of_Birth'])
train['DisbursalDate'] = pd.to_datetime(train['DisbursalDate'])
test['Date_of_Birth'] = pd.to_datetime(test['Date_of_Birth'])
test['DisbursalDate'] = pd.to_datetime(test['DisbursalDate'])
now = pd.Timestamp('now')
future = train['Date_of_Birth'] > date(year=2050,month=1,day=1)
train.loc[future, 'Date_of_Birth'] -= timedelta(days=365.25*100)
future = test['Date_of_Birth'] > date(year=2050,month=1,day=1)
test.loc[future, 'Date_of_Birth'] -= timedelta(days=365.25*100)
train['birth_year'] = train['Date_of_Birth'].apply(lambda ts: ts.year)
train['birth_month'] = train['Date_of_Birth'].apply(lambda ts: ts.month)
train['birth_day'] = train['Date_of_Birth'].apply(lambda ts: ts.day)
train['birth_dayofweek'] = train['Date_of_Birth'].apply(lambda ts: ts.dayofweek)
train['Disbursal_month'] = train['DisbursalDate'].apply(lambda ts: ts.month)
train['Disbursal_day'] = train['DisbursalDate'].apply(lambda ts: ts.day)
train['Disbursal_dayofweek'] = train['DisbursalDate'].apply(lambda ts: ts.dayofweek)
train['Age'] =(now - train['Date_of_Birth'] ).dt.days
train['DaysSinceDisbursal'] =(now - train['DisbursalDate'] ).dt.days
test['birth_year'] = test['Date_of_Birth'].apply(lambda ts: ts.year)
test['birth_month'] = test['Date_of_Birth'].apply(lambda ts: ts.month)
test['birth_day'] = test['Date_of_Birth'].apply(lambda ts: ts.day)
test['birth_dayofweek'] = test['Date_of_Birth'].apply(lambda ts: ts.dayofweek)
test['Disbursal_month'] = test['DisbursalDate'].apply(lambda ts: ts.month)
test['Disbursal_day'] = test['DisbursalDate'].apply(lambda ts: ts.day)
test['Disbursal_dayofweek'] = test['DisbursalDate'].apply(lambda ts: ts.dayofweek)
test['Age'] =(now - test['Date_of_Birth'] ).dt.days
test['DaysSinceDisbursal'] =(now - test['DisbursalDate'] ).dt.days
train = train.drop(['Date_of_Birth', 'DisbursalDate'], axis=1)
test = test.drop(['Date_of_Birth', 'DisbursalDate'], axis=1 ) | Car loan default |
8,861,736 | train_df = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv" )<correct_missing_values> | def get_nmbr(text):
return int(text[0:text.find('y')])* 12 + int(text[text.find(' ')+1:text.find('m')])
train['AVERAGE_ACCT_AGE'] = train['AVERAGE_ACCT_AGE'].apply(get_nmbr)
train['CREDIT_HISTORY_LENGTH'] = train['CREDIT_HISTORY_LENGTH'].apply(get_nmbr)
test['AVERAGE_ACCT_AGE'] = test['AVERAGE_ACCT_AGE'].apply(get_nmbr)
test['CREDIT_HISTORY_LENGTH'] = test['CREDIT_HISTORY_LENGTH'].apply(get_nmbr ) | Car loan default |
8,861,736 | train_df = train_df[train_df["Date"] < "2020-03-19"]
train_df = train_df.fillna("No State" )<normalization> | categ_cols = ['branch_id', 'manufacturer_id', 'Employment_Type', 'State_ID', 'PERFORM_CNS_SCORE_DESCRIPTION',
'Aadhar_flag', 'PAN_flag', 'VoterID_flag', 'Driving_flag', 'Passport_flag'] | Car loan default |
8,861,736 | test_rate = 0.05
maxlen = 20
train_date_count = len(set(train_df["Date"]))
X, Y = [],[]
scaler = StandardScaler()
train_df["ConfirmedCases_std"] = scaler.fit_transform(train_df["ConfirmedCases"].values.reshape(len(train_df["ConfirmedCases"].values),1))
for state,country in train_df.groupby(["Province_State","Country_Region"] ).sum().index:
df = train_df[(train_df["Country_Region"] == country)&(train_df["Province_State"] == state)]
if df["ConfirmedCases"].sum() != 0:
for i in range(len(df)- maxlen):
if df[['ConfirmedCases']].iloc[i+maxlen].values != 0:
X.append(df[['ConfirmedCases_std']].iloc[i:(i+maxlen)].values)
Y.append(df[['ConfirmedCases_std']].iloc[i+maxlen].values)
X=np.array(X)
Y=np.array(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle = True ,random_state = 0 )<compute_test_metric> | for col in ['supplier_id', 'Current_pincode_ID', 'Employee_code_ID']:
train.loc[train[col].value_counts() [train[col]].values < 2, col] = -9999
test.loc[test[col].value_counts() [test[col]].values < 2, col] = -9999 | Car loan default |
8,861,736 | def huber_loss(y_true, y_pred, clip_delta=1.0):
error = y_true - y_pred
cond = tf.keras.backend.abs(error)< clip_delta
squared_loss = 0.5 * tf.keras.backend.square(error)
linear_loss = clip_delta *(tf.keras.backend.abs(error)- 0.5 * clip_delta)
return tf.where(cond, squared_loss, linear_loss)
def huber_loss_mean(y_true, y_pred, clip_delta=1.0):
return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta))
def rmsle(y, y_pred):
assert len(y)== len(y_pred)
terms_to_sum = [(math.log(y_pred[i] + 1)- math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)]
return(sum(terms_to_sum)*(1.0/len(y)))** 0.5<choose_model_class> | train.drop("UniqueID", axis=1, inplace=True)
test.drop("UniqueID", axis=1, inplace=True ) | Car loan default |
8,861,736 | epochs_num = 20
n_hidden = 300
n_in = 1
model = Sequential()
model.add(GRU(n_hidden,
batch_input_shape=(None, maxlen, n_in),
kernel_initializer='random_uniform',
return_sequences=False))
model.add(Dense(n_in, kernel_initializer='random_uniform'))
model.add(Activation("linear"))
opt = Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4)
model.compile(loss = huber_loss_mean, optimizer=opt )<train_model> | train = pd.get_dummies(train)
test = pd.get_dummies(test ) | Car loan default |
8,861,736 | early_stopping = EarlyStopping(monitor='loss', patience=5, verbose=1)
hist = model.fit(X_train, Y_train, batch_size=10, epochs=epochs_num,
callbacks=[early_stopping],shuffle=False )<predict_on_test> | def prod_det(x):
for i in range(x.shape[1]):
max = np.max(x[:,i])
x[:,i] = x[:,i] / max
return abs(np.linalg.det(np.dot(x.T, x)) ) | Car loan default |
8,861,736 | predicted_std = model.predict(X_test)
result_std= pd.DataFrame(predicted_std)
result_std.columns = ['predict']
result_std['actual'] = Y_test<normalization> | train_cols = train.drop(['target', 'MobileNo_Avl_Flag'], axis=1 ).columns | Car loan default |
8,861,736 | predicted = scaler.inverse_transform(predicted_std)
Y_test = scaler.inverse_transform(Y_test )<create_dataframe> | var_sets = dict() | Car loan default |
8,861,736 | result= pd.DataFrame(predicted)
result.columns = ['predict']
result['actual'] = Y_test
result.plot(figsize=(25,6))
plt.show()<load_from_csv> | for num, col in enumerate(train_cols):
cols_list = [col]
i = 0
while i <= 20:
dict_det = dict()
for col_to_add in train_cols:
if col_to_add not in cols_list:
df_aux = np.array(train[cols_list])
dict_det[col_to_add] = prod_det(df_aux)
cols_list.append(sorted(dict_det.items() , key=lambda x: -x[1])[0][0])
i += 1
var_sets[num] = cols_list | Car loan default |
8,861,736 | test_df = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv")
test_df<load_from_csv> | var_set = dict()
for i in range(len(var_sets.items())) :
var_set[i] = set(var_sets[i])
| Car loan default |
8,861,736 | submission_c = pd.read_csv(".. /input/covid19-global-forecasting-week-2/submission.csv" )<filter> | list_of_sets = [i[1] for i in list(var_set.items())] | Car loan default |
8,861,736 | temp =(datetime.datetime.strptime("2020-03-18", '%Y-%m-%d')- datetime.timedelta(days=maxlen)).strftime('%Y-%m-%d')
test_df = train_df[train_df["Date"] > temp]<load_from_csv> | list_of_sets = np.unique(list_of_sets ) | Car loan default |
8,861,736 | check_df = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv" ).query("Date>'2020-03-18'and Date<='2020-03-31'")
check_df["ConfirmedCases_std"] = scaler.transform(check_df["ConfirmedCases"].values.reshape(len(check_df["ConfirmedCases"].values),1))<predict_on_test> | def test_var_sets(train):
cat_cols = [i for i in train.columns if i in categ_cols]
hot = OneHotEncoder(handle_unknown='ignore')
train_tr = pd.DataFrame(hot.fit_transform(train[cat_cols] ).toarray() , columns=hot.get_feature_names())
train = pd.concat([train.drop(cat_cols, axis=1), train_tr], axis=1)
del(train_tr)
y = train['target']
train = train.drop(['target', 'MobileNo_Avl_Flag'], axis=1)
X_train, X_valid, y_train, y_valid = train_test_split(train, y, test_size=0.3, random_state = 42)
lgbt = LGBMClassifier(max_depth=4, learning_rate=0.03, n_estimators=1000, random_state=42)
lgbt.fit(X_train, y_train)
y_pred = lgbt.predict_proba(X_valid)[:, 1]
return roc_auc_score(y_valid, y_pred ) | Car loan default |
8,861,736 | confirmedCases_pred = []
for i in range(0,294*maxlen,maxlen):
temp_array = np.array(test_df["ConfirmedCases_std"][i:i+maxlen])
for j in range(43):
if j<13:
temp_array = np.append(temp_array,np.array(check_df["ConfirmedCases_std"])[int(i*13/maxlen)+j])
elif np.array(test_df["ConfirmedCases"][i:i+maxlen] ).sum() == 0:
temp_array = np.append(temp_array,temp_array[-1])
else:
temp_array = np.append(temp_array,model.predict(temp_array[-maxlen:].reshape(1,maxlen,1)))
confirmedCases_pred.append(temp_array[-43:] )<normalization> | var_sets_auc = dict() | Car loan default |
8,861,736 | submission_c["ConfirmedCases"] = np.abs(scaler.inverse_transform(np.array(confirmedCases_pred ).reshape(294*43)))
submission_c["ConfirmedCases_std"] = np.array(confirmedCases_pred ).reshape(294*43)
submission_c<save_to_csv> | for varset in list_of_sets:
trainset = train[list(varset)+ ['target', 'MobileNo_Avl_Flag']]
auc = test_var_sets(trainset)
var_sets_auc[auc] = varset | Car loan default |
8,861,736 | submission_c.to_csv('./submission_c.csv')
submission_c.to_csv('.. \output\kaggle\working\submission_c.csv' )<normalization> | best_set = sorted(var_sets_auc.items() , key=lambda x: -x[0])[0][1] | Car loan default |
8,861,736 | test_rate = 0.05
maxlen = 20
train_date_count = len(set(train_df["Date"]))
X, Y = [],[]
scaler = StandardScaler()
train_df["Fatalities_std"] = scaler.fit_transform(train_df["Fatalities"].values.reshape(len(train_df["Fatalities"].values),1))
ss = StandardScaler()
train_df["ConfirmedCases_std"] = ss.fit_transform(train_df["ConfirmedCases"].values.reshape(len(train_df["ConfirmedCases"].values),1))
for state,country in train_df.groupby(["Province_State","Country_Region"] ).sum().index:
df = train_df[(train_df["Country_Region"] == country)&(train_df["Province_State"] == state)]
if df["Fatalities"].sum() != 0 or df["ConfirmedCases"].sum() != 0:
for i in range(len(df)- maxlen):
if(df[['ConfirmedCases']].iloc[i+maxlen].values != 0 or df[['Fatalities']].iloc[i+maxlen].values != 0):
X.append(df[['Fatalities_std','ConfirmedCases_std']].iloc[i:(i+maxlen)].values)
Y.append(df[['Fatalities_std']].iloc[i+maxlen].values)
X=np.array(X)
Y=np.array(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_rate, shuffle = True ,random_state = 0 )<choose_model_class> | train_best = train[best_set]
test_best = test[best_set]
cat_cols = [i for i in train_best.columns if i in categ_cols] | Car loan default |
8,861,736 | epochs_num = 25
n_hidden = 300
n_in = 2
model = Sequential()
model.add(GRU(n_hidden,
batch_input_shape=(None, maxlen, n_in),
kernel_initializer='random_uniform',
return_sequences=False))
model.add(Dense(1, kernel_initializer='random_uniform'))
model.add(Activation("linear"))
opt = Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4)
model.compile(loss = huber_loss_mean, optimizer=opt )<train_model> | train_best.nunique() | Car loan default |
8,861,736 | early_stopping = EarlyStopping(monitor='loss', patience=5, verbose=1)
hist = model.fit(X_train, Y_train, batch_size=8, epochs=epochs_num,
callbacks=[early_stopping],shuffle=False )<predict_on_test> | hot = OneHotEncoder(handle_unknown='ignore')
train_tr = pd.DataFrame(hot.fit_transform(train_best[cat_cols] ).toarray() , columns=hot.get_feature_names())
test_tr = pd.DataFrame(hot.transform(test_best[cat_cols] ).toarray() , columns=hot.get_feature_names())
train_best = pd.concat([train_best.drop(cat_cols, axis=1), train_tr], axis=1)
test_best = pd.concat([test_best.drop(cat_cols, axis=1), test_tr], axis=1)
del(train_tr, test_tr ) | Car loan default |
8,861,736 | predicted_std = model.predict(X_test)
result_std= pd.DataFrame(predicted_std)
result_std.columns = ['predict']
result_std['actual'] = Y_test<normalization> | y = train['target']
| Car loan default |
8,861,736 | predicted = scaler.inverse_transform(predicted_std)
Y_test = scaler.inverse_transform(Y_test )<normalization> | X_train, X_valid, y_train, y_valid = train_test_split(train_best, y, test_size=0.3, random_state = 42 ) | Car loan default |
8,861,736 | X_test_ = scaler.inverse_transform(X_test)
X_test_[9]<prepare_output> | lgbm_params = {'max_depth': [3,5,7],
'learning_rate':[0.05, 0.01, 0.03],
'n_estimators':[1000, 1200, 1400]}
lgbt = LGBMClassifier(random_state=42)
cv = KFold(n_splits=5, shuffle=True, random_state=42)
lgbm_grid = GridSearchCV(lgbt, lgbm_params, cv=cv, scoring='roc_auc', verbose=1, n_jobs=-1)
lgbm_grid.fit(X_train, y_train ) | Car loan default |
8,861,736 | submission_df = submission_c<filter> | lgbm_grid.best_params_ | Car loan default |
8,861,736 | temp =(datetime.datetime.strptime("2020-03-18", '%Y-%m-%d')- datetime.timedelta(days=maxlen)).strftime('%Y-%m-%d')
test_df = train_df[train_df["Date"] > temp]<normalization> | lgbt = LGBMClassifier(max_depth=3, learning_rate=0.05, n_estimators=1000, random_state=42)
lgbt.fit(X_train, y_train)
y_pred = lgbt.predict_proba(X_valid)[:, 1]
y_pred =(y_pred - np.min(y_pred)) /(np.max(y_pred)- np.min(y_pred))
roc_auc_score(y_valid, y_pred ) | Car loan default |
8,861,736 | check_df["Fatalities_std"] = scaler.transform(check_df["Fatalities"].values.reshape(len(check_df["Fatalities"].values),1))
check_df<concatenate> | y = train['target']
train = train.drop(['target', 'MobileNo_Avl_Flag'], axis=1)
test = test.drop(['MobileNo_Avl_Flag'], axis=1)
X_train, X_valid, y_train, y_valid = train_test_split(train, y, test_size=0.3, random_state = 42)
lgbm_params = {'max_depth': [4],
'learning_rate':np.arange(0.03, 0.06, 0.01),
'n_estimators':[500, 1000, 1500, 2000]}
lgbt = LGBMClassifier(random_state=42)
cv = KFold(n_splits=5, shuffle=True, random_state=42)
lgbm_grid = GridSearchCV(lgbt, lgbm_params, cv=cv, scoring='roc_auc')
lgbm_grid.fit(X_train, y_train ) | Car loan default |
8,861,736 | fatalities_pred = []
for i in range(0,294*maxlen,maxlen):
temp_array = np.array(test_df[["Fatalities_std","ConfirmedCases_std"]][i:i+maxlen])
for j in range(43):
if j<13:
temp_array = np.append(temp_array,np.append(np.array(check_df["Fatalities_std"])[int(i*13/maxlen)+j],np.array(check_df["ConfirmedCases_std"])[int(i*13/maxlen)+j] ).reshape(1,2),axis=0)
elif np.array(test_df[["Fatalities","ConfirmedCases"]][i:i+maxlen] ).sum() == 0:
temp_array = np.append(temp_array,np.array(temp_array[-1] ).reshape(1,2),axis=0)
else:
temp_array = np.append(temp_array,np.append(model.predict(temp_array[-maxlen:].reshape(1,maxlen,2)) ,submission_df["ConfirmedCases_std"][i/maxlen*43+j] ).reshape(1,2),axis=0)
fatalities_pred.append(temp_array[-43:] )<normalization> | lgbt = LGBMClassifier(max_depth=4, learning_rate=0.03, n_estimators=1000)
lgbt.fit(X_train, y_train)
y_pred = lgbt.predict_proba(X_valid)[:, 1]
roc_auc_score(y_valid, y_pred ) | Car loan default |
8,861,736 | submission_df["Fatalities"] = np.abs(scaler.inverse_transform([i[0] for i in np.array(fatalities_pred ).reshape(294*43,2)]))
submission_df<data_type_conversions> | lgbt = LGBMClassifier(max_depth=4, learning_rate=0.03, n_estimators=1000)
lgbt.fit(train, y ) | Car loan default |
8,861,736 | submission_df[["ConfirmedCases","Fatalities"]] = submission_df[["ConfirmedCases","Fatalities"]].round().astype(int)
submission_df<drop_column> | answer_1 = lgbt.predict_proba(test)[:, 1]
answer_1 | Car loan default |
8,861,736 | submission_df = submission_df.drop("ConfirmedCases_std",axis=1 )<drop_column> | answer1 = pd.DataFrame(columns=['ID', 'Predicted'])
answer1['ID'] = test.index
answer1['Predicted'] = answer_1 | Car loan default |
8,861,736 | <save_to_csv><EOS> | answer1.to_csv('answer228.csv', index=None ) | Car loan default |
8,807,169 | <SOS> metric: rmse Kaggle data source: similarity-search-project<import_modules> | inputFolderPath = '/kaggle/input/similarity-search-project/'
outputFolderPath = '/kaggle/working/'
inputFilePath_synthetic = inputFolderPath + 'synthetic_size50k_len256_znorm.bin'
inputFilePath_seismic = inputFolderPath + 'seismic_size50k_len256_znorm.bin'
queryFilePath_synthetic = inputFolderPath + 'synthetic-query_size100_len256_znorm.bin'
queryFilePath_seismic = inputFolderPath + 'seismic-query_size100_len256_znorm'
| Similarity Search Project |
8,807,169 | print("Read in libraries")
<load_from_csv> | def sum32(inputFilePath):
summary_filepath = outputFolderPath + filename + '_sum32'
time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256)
summary50k = []
for time_series in time_series50k:
summary50k.append(time_series[0])
summary50knp = np.array(summary50k,dtype=np.float32)
summary50knp.tofile(summary_filepath)
return summary_filepath
def rec32(summary_filepath):
reconstructed_filepath = summary_filepath + '_rec32'
summary50k = np.fromfile(summary_filepath, dtype=np.float32)
reconstructed50k = []
for summary in summary50k:
reconstructed50k.append([summary]*256)
reconstructed50knp = np.array(reconstructed50k,dtype=np.float32)
reconstructed50knp.tofile(reconstructed_filepath)
return reconstructed_filepath
def sum64(inputFilePath):
summary_filepath = outputFolderPath + filename + '_sum64'
time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256)
summary50k = []
for time_series in time_series50k:
summary50k.append(time_series[0])
summary50k.append(time_series[0])
summary50knp = np.array(summary50k,dtype=np.float32)
summary50knp.tofile(summary_filepath)
return summary_filepath
def rec64(summary_filepath):
reconstructed_filepath = summary_filepath + '_rec64'
summary50k = np.fromfile(summary_filepath, dtype=np.float32 ).reshape(-1, 2)
reconstructed50k = []
for summary in summary50k:
reconstructed50k.append([summary[0]]*256)
reconstructed50knp = np.array(reconstructed50k,dtype=np.float32)
reconstructed50knp.tofile(reconstructed_filepath)
return reconstructed_filepath
def sum128(inputFilePath):
summary_filepath = outputFolderPath + filename + '_sum128'
time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256)
summary50k = []
for time_series in time_series50k:
summary50k.append(time_series[0])
summary50k.append(time_series[0])
summary50k.append(time_series[0])
summary50k.append(time_series[0])
summary50knp = np.array(summary50k,dtype=np.float32)
summary50knp.tofile(summary_filepath)
return summary_filepath
def rec128(summary_filepath):
reconstructed_filepath = summary_filepath + '_rec128'
summary50k = np.fromfile(summary_filepath, dtype=np.float32 ).reshape(-1, 4)
reconstructed50k = []
for summary in summary50k:
reconstructed50k.append([summary[0]]*256)
reconstructed50knp = np.array(reconstructed50k,dtype=np.float32)
reconstructed50knp.tofile(reconstructed_filepath)
return reconstructed_filepath | Similarity Search Project |
8,807,169 | print("read in train file")
df=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv",
usecols=['Province_State','Country_Region','Date','ConfirmedCases','Fatalities'])
<drop_column> | def similarity() :
average_prune_ratio = 0
return average_prune_ratio | Similarity Search Project |
8,807,169 | <load_from_csv><EOS> | average_prune_ratio = similarity()
with open('submission.csv', 'w', newline='')as file:
writer = csv.writer(file)
writer.writerow(['id','expected'])
writer.writerow(['1',average_prune_ratio] ) | Similarity Search Project |
9,417,707 | train.loc[:, train.isna().any() ]<rename_columns> | train = pd.read_csv('/kaggle/input/1056lab-brain-cancer-classification/train.csv')
test = pd.read_csv('/kaggle/input/1056lab-brain-cancer-classification/test.csv')
train['type'] = train['type'].map({'normal':0, 'ependymoma':1, 'glioblastoma':2, 'medulloblastoma':3, 'pilocytic_astrocytoma':4} ) | Brain Cancer Classification |
9,417,707 | train.rename(columns={'Province_State':'Province'}, inplace=True)
train.rename(columns={'Country_Region':'Country'}, inplace=True)
train.rename(columns={'ConfirmedCases':'Confirmed'}, inplace=True )<rename_columns> | Y = train['type'].values
X = train.drop('type',axis=1 ).values | Brain Cancer Classification |
9,417,707 | test.rename(columns={'Province_State':'Province'}, inplace=True)
test.rename(columns={'Country_Region':'Country'}, inplace=True )<data_type_conversions> | model = lgb.LGBMClassifier() | Brain Cancer Classification |
9,417,707 | EMPTY_VAL = "EMPTY_VAL"
def fillState(state, country):
if Province == EMPTY_VAL: return country
return state
train['Province'].fillna(EMPTY_VAL, inplace=True)
test['Province'].fillna(EMPTY_VAL, inplace=True )<categorify> | def met_f(y_test,y_pred):
return f1_score(y_test,y_pred,average='micro' ) | Brain Cancer Classification |
9,417,707 | bridge_types =('Date', 'Province', 'Country', 'Confirmed',
'Id')
countries = pd.DataFrame(train, columns=['Country'])
state = pd.DataFrame(train, columns=['Province'])
labelencoder = LabelEncoder()
train['Countries'] = labelencoder.fit_transform(train['Country'])
train['State']= labelencoder.fit_transform(train['Province'])
train
test['Countries'] = labelencoder.fit_transform(test['Country'])
test['State']= labelencoder.fit_transform(test['Province'])
train['Countries'].head()
<data_type_conversions> | selector = SelectFromModel(model, threshold="mean")
selector.fit(X, Y)
X_selected = selector.transform(X)
test_selected = selector.transform(test.values ) | Brain Cancer Classification |
9,417,707 | train['Date']= pd.to_datetime(train['Date'])
test['Date']= pd.to_datetime(test['Date'])
<drop_column> | model = lgb.LGBMClassifier()
stratifiedkfold = StratifiedKFold(n_splits=3)
score_func = {'auc': make_scorer(met_f)}
scores = cross_validate(model, X_selected, Y, cv = stratifiedkfold, scoring=score_func)
print('auc:', scores['test_auc'])
print('auc:', scores['test_auc'].mean() ) | Brain Cancer Classification |
9,417,707 | train = train.set_index(['Date'])
test = test.set_index(['Date'])
train<feature_engineering> | model = lgb.LGBMClassifier()
model.fit(X_selected,Y)
p = model.predict(test_selected)
sample = pd.read_csv('/kaggle/input/1056lab-brain-cancer-classification/sampleSubmission.csv',index_col = 0)
sample['type'] = p
sample.to_csv('predict_lgbm_sfm.csv',header = True ) | Brain Cancer Classification |
9,417,707 | def create_time_features(df):
df['date'] = df.index
df['hour'] = df['date'].dt.hour
df['dayofweek'] = df['date'].dt.dayofweek
df['quarter'] = df['date'].dt.quarter
df['month'] = df['date'].dt.month
df['year'] = df['date'].dt.year
df['dayofyear'] = df['date'].dt.dayofyear
df['dayofmonth'] = df['date'].dt.day
df['weekofyear'] = df['date'].dt.weekofyear
X = df[['hour','dayofweek','quarter','month','year',
'dayofyear','dayofmonth','weekofyear']]
return X<drop_column> | selector = SelectPercentile(score_func=f_regression, percentile=100)
selector.fit(X, Y)
X_selected = selector.transform(X)
test_selected = selector.transform(test.values ) | Brain Cancer Classification |