kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,417,707
train.drop("date", axis=1, inplace=True) test.drop("date", axis=1, inplace=True )<count_missing_values>
model = lgb.LGBMClassifier() stratifiedkfold = StratifiedKFold(n_splits=3) score_func = {'auc': make_scorer(met_f)} scores = cross_validate(model, X_selected, Y, cv = stratifiedkfold, scoring=score_func) print('auc:', scores['test_auc']) print('auc:', scores['test_auc'].mean() )
Brain Cancer Classification
9,417,707
<count_missing_values><EOS>
model = lgb.LGBMClassifier() model.fit(X_selected,Y) p = model.predict(test_selected) sample = pd.read_csv('/kaggle/input/1056lab-brain-cancer-classification/sampleSubmission.csv',index_col = 0) sample['type'] = p sample.to_csv('predict_lgbm_100.csv',header = True )
Brain Cancer Classification
71,721
<train_on_grid><EOS>
class TextTransformer(BaseEstimator, TransformerMixin): def __init__(self, est): super(TextTransformer, self ).__init__() self.est = est pass def fit(self, X, y=None): self.est.fit(X.ravel()) return self def transform(self, X): Xs = [ self.est.transform(X[:,_]) for _ in range(X.shape[-1]) ] result =(Xs[0]>0 ).astype(int) for _ in range(len(Xs)-1): result +=(Xs[_+1]>0 ).astype(int) return sp.hstack(( (result == len(Xs)).astype(float), (result == 1 ).astype(float) )).tocsr() class SupervisedTransformer(BaseEstimator, TransformerMixin): def __init__(self, est, method): super(SupervisedTransformer, self ).__init__() self.est = est self.method = method pass def fit(self, X, y=None): self.est.fit(X, y) return self def transform(self, X): return getattr(self.est, self.method )(X) class EqNotEqBinarizer(BaseEstimator, TransformerMixin): def __init__(self): super(EqNotEqBinarizer, self ).__init__() pass def fit(self, X, y=None): assert X.shape[-1] == 2, 'Only two-column arrays' self.bin_ = LabelBinarizer(sparse_output=True) self.bin_.fit(X.ravel()) return self def transform(self, X): z = np.zeros(( X.shape[0], 3), dtype=int) eqmask = X[:,0] == X[:,1] noteqmask = X[:,0] != X[:,1] z[eqmask,0] = X[eqmask,0] z[noteqmask,1] = X[noteqmask,0] z[noteqmask,2] = X[noteqmask,1] return sp.hstack(( self.bin_.transform(z[:,0]), self.bin_.transform(z[:,1])+ self.bin_.transform(z[:,2]) )) def get_file(mode): params = {} if mode == 'test': params['index_col'] = 0 items = pd.read_csv('.. /input/ItemInfo_%s.csv' % mode, index_col=0)[[ 'categoryID', 'title', 'locationID', 'metroID', 'lon', 'lat']] items['title'] = items['title'].fillna('nan') items['metroID'] = items['metroID'].fillna(-1) parent_categories = pd.read_csv('.. /input/Category.csv', index_col=0) regions = pd.read_csv('.. /input/Location.csv', index_col=0) items = pd.merge(items, parent_categories, left_on='categoryID', right_index=True, how='inner', sort=False) items = pd.merge(items, regions, left_on='locationID', right_index=True, how='inner', sort=False) del parent_categories del regions pr = pd.read_csv('.. /input/ItemPairs_%s.csv' % mode, **params) pr = pd.merge(pr, items, left_on='itemID_1', right_index=True, how='inner', sort=False) pr = pd.merge(pr, items, left_on='itemID_2', right_index=True, how='inner', sort=False) del items print('Columns: ' + str(pr.columns), file=sys.stderr) fields = [ 'categoryID_x', 'parentCategoryID_x', 'title_x', 'title_y', 'locationID_x', 'locationID_y', 'regionID_x', 'regionID_y', 'metroID_x', 'metroID_y', 'lon_x', 'lon_y', 'lat_x', 'lat_y', ] if mode == 'train': return pr[fields + ['isDuplicate']] else: return pr[fields] def get_balanced_train_indices(column="categoryID"): prtest = pd.read_csv(".. /input/ItemPairs_test.csv", index_col=0) prtest = pd.merge(prtest, pd.read_csv(".. /input/ItemInfo_test.csv", index_col=0), left_on="itemID_1", right_index=True, how="inner", sort=False) catdist = prtest[column].value_counts() / len(prtest) del prtest prtrain = pd.read_csv(".. /input/ItemPairs_train.csv") prtrain = pd.merge(prtrain, pd.read_csv(".. /input/ItemInfo_train.csv", index_col=0), left_on="itemID_1", right_index=True, how="inner", sort=False) indices = np.array([]) trainsize = len(prtrain) for cat, dist in catdist.iteritems() : trcatdist = len(prtrain[prtrain[column] == cat]) if trcatdist < int(1.0 * dist * trainsize): trainsize = int(1.0 * trainsize * trcatdist /(dist * trainsize)) for cat, dist in catdist.iteritems() : indices = np.hstack(( indices, shuffle(prtrain[prtrain[column] == cat].index, random_state=1)[:int(dist*trainsize)])) indices = pd.Index(np.sort(indices.astype(int))) return indices def _print_shape(X): print("SHAPE: ", X.shape, file=sys.stderr) return X est = Pipeline([ ('shape1', FunctionTransformer(_print_shape, validate=False)) , ('feats', FeatureUnion(transformer_list=[ ('categories', Pipeline([ ('filter', FunctionTransformer(lambda X: X[:,[0]], validate=False)) , ('binarizer', OneHotEncoder()), ('shape1', FunctionTransformer(_print_shape, validate=False)) , ])) , ('parentCategories', Pipeline([ ('filter', FunctionTransformer(lambda X: X[:,[1]], validate=False)) , ('binarizer', OneHotEncoder()), ('shape1', FunctionTransformer(_print_shape, validate=False)) , ])) , ('titles', Pipeline([ ('filter', FunctionTransformer(lambda X: X[:,[2,3]], validate=False)) , ('titleswitch', TextTransformer(CountVectorizer(binary=True))), ('logreg', SupervisedTransformer(LogisticRegression(C=0.01), 'predict_proba')) , ('selector', FunctionTransformer(lambda X: X[:,[1]])) , ('shape1', FunctionTransformer(_print_shape, validate=False)) , ])) , ('regionID', Pipeline([ ('filter', FunctionTransformer(lambda X: X[:,[6,7]].astype(int), validate=False)) , ('binarizer', EqNotEqBinarizer()), ('threshold', VarianceThreshold(0.0001)) , ('shape1', FunctionTransformer(_print_shape, validate=False)) , ])) , ('coords', Pipeline([ ('filter', FunctionTransformer(lambda X: X[:,[10,11,12,13]].astype(float), validate=False)) , ('shape1', FunctionTransformer(_print_shape, validate=False)) , ])) , ])) , ('shape2', FunctionTransformer(_print_shape, validate=False)) , ('est', XGBClassifier()), ]) pr = get_file('train') print('Columns: ' + str(pr.columns), file=sys.stderr) print('FITTING...', file=sys.stderr) est.fit(pr.drop('isDuplicate', axis=1 ).values, pr['isDuplicate'].values) print('FITTED', file=sys.stderr) del pr pr = get_file('test') print('Columns: ' + str(pr.columns), file=sys.stderr) pr['probability'] = est.predict_proba(pr.values)[:,1] pr[['probability']].to_csv('submission.csv')
Avito Duplicate Ads Detection
93,236
<SOS> metric: MulticlassLoss Kaggle data source: talkingdata-mobile-user-demographics<prepare_output>
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import os from sklearn.preprocessing import LabelEncoder from scipy.sparse import csr_matrix, hstack import xgboost as xgb from sklearn.cross_validation import StratifiedKFold from sklearn.metrics import log_loss
TalkingData Mobile User Demographics
93,236
for i in range(1,len(result)) : prediction = prediction.append(state_i_data[i].iloc[-21:]) prediction = prediction.append(result[i] )<define_variables>
def bintod(x): if x < 3: return 0 elif x < 8: return 1 elif x < 20: return 2 elif x < 23: return 3 else: return 0 def lngregion(x): if x < 80: return 1 elif x < 90: return 2 elif x < 100: return 3 elif x < 110: return 4 elif x < 120: return 5 elif x < 130: return 6 elif x < 140: return 7 else: return 0 nlng = 8 nlat = 9 def latregion(x): if x < 20: return 1 elif x < 25: return 2 elif x < 30: return 3 elif x < 35: return 4 elif x < 40: return 5 elif x < 45: return 6 elif x < 50: return 7 elif x < 55: return 8 else: return 0
TalkingData Mobile User Demographics
93,236
prediction.index = range(0,len(prediction)) <load_from_csv>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv')) labelcat = pd.read_csv(os.path.join(datadir,'label_categories.csv')) labelcat['category']=labelcat['category'].fillna('label-missing') labelcat.head()
TalkingData Mobile User Demographics
93,236
sub_format = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/submission.csv' )<drop_column>
gatrain['trainrow'] = np.arange(gatrain.shape[0]) gatest['testrow'] = np.arange(gatest.shape[0] )
TalkingData Mobile User Demographics
93,236
sub_format = sub_format['ForecastId']<concatenate>
events['longitude'] = events['longitude'].round(0) events['latitude'] = events['latitude'].round(0) events['longitude'] = events['longitude'].clip_lower(73.0 ).replace(73.0, np.NaN) events['longitude'] = events['longitude'].clip_upper(135.0 ).replace(135.0, np.NaN) events['latitude'] = events['latitude'].clip_lower(15.0 ).replace(15.0, np.NaN) events['latitude'] = events['latitude'].clip_upper(60.0 ).replace(60.0, np.NaN) events['latitude2'] =events.groupby(['device_id'])['latitude'].transform(lambda x: x.mode()) events['longitude2'] =events.groupby(['device_id'])['longitude'].transform(lambda x: x.mode() )
TalkingData Mobile User Demographics
93,236
final = pd.concat([sub_format,prediction],axis=1 )<save_to_csv>
events_latlng = events[['device_id', 'latitude2','longitude2']].drop_duplicates('device_id', keep='first') events_latlng = events_latlng.set_index('device_id') print('Number of devices with some lat long info',len(events_latlng['latitude2'])) print('out of that missing longitude: ', sum(events_latlng['longitude2'].isnull())) print('out of that missing latitude: ', sum(events_latlng['latitude2'].isnull())) events_latlng['lng_region'] = events_latlng['longitude2'].apply(lngregion) events_latlng['lat_region'] = events_latlng['latitude2'].apply(latregion) print("Frequencies longitude region:" ' ', events_latlng['lng_region'].value_counts()) print("Frequencies latitude region:" ' ', events_latlng['lat_region'].value_counts() )
TalkingData Mobile User Demographics
93,236
final.to_csv('submission.csv',index=False )<import_modules>
gatrain['lng_region'] = events_latlng['lng_region'] gatest['lng_region'] = events_latlng['lng_region'] gatrain['lng_region'] = gatrain.lng_region.fillna(value=0.0) gatest['lng_region'] = gatest.lng_region.fillna(value=0.0) Xtr_lng = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.lng_region)) , shape=(gatrain.shape[0],nlng)) Xte_lng = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.lng_region)) , shape=(gatest.shape[0],nlng)) print('Longitude features: train shape {}, test shape {}'.format(Xtr_lng.shape, Xte_lng.shape))
TalkingData Mobile User Demographics
93,236
import plotly.graph_objects as go import matplotlib.pyplot as plt from tqdm import tqdm import time from datetime import datetime from pathlib import Path from sklearn import preprocessing import keras.backend as K from keras.models import Sequential from keras.layers import Dense, LSTM, RNN, Dropout from keras.callbacks import EarlyStopping from keras import optimizers from sklearn.preprocessing import StandardScaler, MinMaxScaler<load_from_csv>
gatrain['lat_region'] = events_latlng['lat_region'] gatest['lat_region'] = events_latlng['lat_region'] gatrain['lat_region'] = gatrain.lat_region.fillna(value=0.0) gatest['lat_region'] = gatest.lat_region.fillna(value=0.0) Xtr_lat = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.lat_region)) , shape=(gatrain.shape[0],nlat)) Xte_lat = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.lat_region)) , shape=(gatest.shape[0],nlat)) print('Latitude features: train shape {}, test shape {}'.format(Xtr_lat.shape, Xte_lat.shape))
TalkingData Mobile User Demographics
93,236
train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv") test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/test.csv") train.tail()<define_variables>
events['timeadj'] =(events['longitude2']- 116.41)*4 events['timeadj'] = events['timeadj'].fillna(0.0 ).astype(int) events['timestamp2'] = events['timestamp'] + events['timeadj'].values.astype('timedelta64[m]') events['todh'] = events['timestamp2'].map(lambda x : x.hour) events['tod'] = events['todh'].apply(bintod) ntod = 4 eventod =(events.groupby(['device_id','tod'])['tod'].agg(['size']) .merge(gatrain[['trainrow']], how='left',left_index=True, right_index=True) .merge(gatest[['testrow']], how='left',left_index=True, right_index=True) .reset_index()) eventod.head()
TalkingData Mobile User Demographics
93,236
mask = train['Date'].max() world_cum_confirmed = sum(train[train['Date'] == mask].ConfirmedCases) world_cum_fatal = sum(train[train['Date'] == mask].Fatalities )<train_model>
eventod['size'] = eventod['size'].map(lambda x: np.log(x+1)) t = eventod.dropna(subset=['trainrow']) Xtr_tod = csr_matrix(( t['size'].values,(t.trainrow, t.tod)) , shape=(gatrain.shape[0],ntod)) t = eventod.dropna(subset=['testrow']) Xte_tod = csr_matrix(( t['size'].values,(t.testrow, t.tod)) , shape=(gatest.shape[0],ntod)) print('TOD data: train shape {}, test shape {}'.format(Xtr_tod.shape, Xte_tod.shape))
TalkingData Mobile User Demographics
93,236
print('Number of Countires are: ', len(train['Country_Region'].unique())) print('Training dataset ends at: ', mask) print('Number of cumulative confirmed cases worldwide are: ', world_cum_confirmed) print('Number of cumulative fatal cases worldwide are: ', world_cum_fatal )<sort_values>
events["dow"] = events["timestamp"].map(lambda x : x.dayofweek) events['wkend'] = 0 events.ix[events.dow > 4,'wkend'] = 1 ndow = 2 evendow =(events.groupby(['device_id','wkend'])['wkend'].agg(['size']) .merge(gatrain[['trainrow']], how='left',left_index=True, right_index=True) .merge(gatest[['testrow']], how='left',left_index=True, right_index=True) .reset_index()) evendow.head()
TalkingData Mobile User Demographics
93,236
cum_per_country = train[train['Date'] == mask].groupby(['Date','Country_Region'] ).sum().sort_values(['ConfirmedCases'], ascending=False) cum_per_country[:10]<groupby>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
93,236
date = train['Date'].unique() cc_us = train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum().ConfirmedCases ft_us = train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum().Fatalities cc_ity = train[train['Country_Region'] == 'Italy'].groupby(['Date'] ).sum().ConfirmedCases ft_ity = train[train['Country_Region'] == 'Italy'].groupby(['Date'] ).sum().Fatalities cc_spn = train[train['Country_Region'] == 'Spain'].groupby(['Date'] ).sum().ConfirmedCases ft_spn = train[train['Country_Region'] == 'Spain'].groupby(['Date'] ).sum().Fatalities cc_gmn = train[train['Country_Region'] == 'Germany'].groupby(['Date'] ).sum().ConfirmedCases ft_gmn = train[train['Country_Region'] == 'Germany'].groupby(['Date'] ).sum().Fatalities cc_frc = train[train['Country_Region'] == 'France'].groupby(['Date'] ).sum().ConfirmedCases ft_frc = train[train['Country_Region'] == 'France'].groupby(['Date'] ).sum().Fatalities fig = go.Figure() fig.add_trace(go.Scatter(x=date, y=cc_us, name='US')) fig.add_trace(go.Scatter(x=date, y=cc_ity, name='Italy')) fig.add_trace(go.Scatter(x=date, y=cc_spn, name='Spain')) fig.add_trace(go.Scatter(x=date, y=cc_gmn, name='Germany')) fig.add_trace(go.Scatter(x=date, y=cc_frc, name='France')) fig.update_layout(title="Plot of Cumulative Cases for Top 5 countires(except China)", xaxis_title="Date", yaxis_title="Cases") fig.update_xaxes(nticks=30) fig.show()<data_type_conversions>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
93,236
train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train['Country_Region'] = train['Country_Region'].astype(str) test['Country_Region'] = test['Country_Region'].astype(str) <feature_engineering>
m = phone.phone_brand.str.cat(phone.device_model.str.split().str.get(0)) modelencoder = LabelEncoder().fit(m) phone['model'] = modelencoder.transform(m) gatrain['model'] = phone['model'] gatest['model'] = phone['model'] Xtr_model = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.model))) Xte_model = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.model))) print('Model features: train shape {}, test shape {}'.format(Xtr_model.shape, Xte_model.shape))
TalkingData Mobile User Demographics
93,236
EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state train['Province_State'].fillna(EMPTY_VAL, inplace=True) train['Province_State'] = train.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1) test['Province_State'].fillna(EMPTY_VAL, inplace=True) test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1 )<categorify>
appencoder = LabelEncoder().fit(appevents.app_id) appevents['app'] = appencoder.transform(appevents.app_id) napps = len(appencoder.classes_) deviceapps =(appevents.merge(events[['device_id']], how='left',left_on='event_id',right_index=True) .groupby(['device_id','app'])['app'].agg(['size']) .merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True) .merge(gatest[['testrow']], how='left', left_index=True, right_index=True) .reset_index() )
TalkingData Mobile User Demographics
93,236
le = preprocessing.LabelEncoder() train['country_encoder'] = le.fit_transform(train['Country_Region']) train['date_int'] = train['Date'].apply(lambda x: datetime.strftime(x, '%m%d')).astype(int) test['country_encoder'] = le.transform(test['Country_Region']) test['date_int'] = test['Date'].apply(lambda x: datetime.strftime(x, '%m%d')).astype(int )<categorify>
d = deviceapps.dropna(subset=['trainrow']) Xtr_app = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.app)) , shape=(gatrain.shape[0],napps)) d = deviceapps.dropna(subset=['testrow']) Xte_app = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.app)) , shape=(gatest.shape[0],napps)) print('Apps data: train shape {}, test shape {}'.format(Xtr_app.shape, Xte_app.shape))
TalkingData Mobile User Demographics
93,236
le = preprocessing.LabelEncoder() train['province_encoder'] = le.fit_transform(train['Province_State']) test['province_encoder'] = le.transform(test['Province_State'] )<count_duplicates>
applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())] applabels['app'] = appencoder.transform(applabels.app_id) labelcat = labelcat.loc[labelcat.label_id.isin(applabels.label_id.unique())] labelencoder = LabelEncoder().fit(labelcat.category) labelcat['label'] = labelencoder.transform(labelcat.category) nlabels = len(labelencoder.classes_) print('number of unique labels:',nlabels) print('recoded label categories', '/n',labelcat.head(n=20)) applabels=applabels.merge(labelcat[['label','label_id']], how='left',left_on='label_id',right_on='label_id') devicelabels =(deviceapps[['device_id','app']] .merge(applabels[['app','label']]) .groupby(['device_id','label'])['app'].agg(['size']) .merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True) .merge(gatest[['testrow']], how='left', left_index=True, right_index=True) .reset_index()) devicelabels.head()
TalkingData Mobile User Demographics
93,236
<load_from_csv>
d = devicelabels.dropna(subset=['trainrow']) Xtr_label = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.label)) , shape=(gatrain.shape[0],nlabels)) d = devicelabels.dropna(subset=['testrow']) Xte_label = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.label)) , shape=(gatest.shape[0],nlabels)) print('Labels data: train shape {}, test shape {}'.format(Xtr_label.shape, Xte_label.shape))
TalkingData Mobile User Demographics
93,236
train_df = pd.read_csv(Path('/kaggle/input/covid19w2', 'train_df.csv'), index_col = 0, parse_dates = ['date']) train_df = train_df[train_df['date_int']>=301] train_df['weekday'] = train_df['date'].dt.weekday train_df[train_df['country'] == 'Italy']<split>
Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label, Xtr_tod, Xtr_dow, Xtr_lat, Xtr_lng), format='csr') Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label, Xte_tod, Xte_dow, Xte_lat, Xte_lng), format='csr') print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape))
TalkingData Mobile User Demographics
93,236
def split_train_val(df, val_ratio): val_len = int(len(df)* val_ratio) train_set = df[:-val_len] val_set = df[-val_len:] return train_set, val_set<prepare_x_and_y>
targetencoder = LabelEncoder().fit(gatrain.group) y = targetencoder.transform(gatrain.group )
TalkingData Mobile User Demographics
93,236
test_fixed_cols = ['ForecastId', 'Province_State', 'Country_Region', 'Date'] fixed_cols = ['Id', 'province', 'country', 'date'] output_cols = ['cc_cases', 'ft_cases'] input_cols = list(set(train_df.columns.to_list())- set(fixed_cols)- set(output_cols)) print('output columns are ', output_cols) print('input columns are ', input_cols) X = train_df[input_cols] y = train_df[output_cols]<categorify>
params = {} params['booster'] = 'gblinear' params['objective'] = "multi:softprob" params['eval_metric'] = 'mlogloss' params['eta'] = 0.005 params['num_class'] = 12 params['lambda'] = 3 params['alpha'] = 2
TalkingData Mobile User Demographics
93,236
cc_input = ['country_encoder', 'province_encoder', 'weekday', 'date_int','cc_cases_1', 'cc_cases_2', 'cc_cases_3', 'cc_cases_4', 'cc_cases_5', 'cc_cases_6', 'cc_cases_7'] ft_input = ['country_encoder', 'province_encoder', 'weekday' , 'date_int', 'ft_cases_1', 'ft_cases_2', 'ft_cases_3', 'ft_cases_4', 'ft_cases_5', 'ft_cases_6', 'ft_cases_7'] cc_output = ['cc_cases'] ft_output = ['ft_cases'] val_ratio = 0.05 X_cc = X[cc_input] X_ft = X[ft_input] y_cc = y[cc_output] y_ft = y[ft_output] train_X_cc, val_X_cc = split_train_val(df = X_cc, val_ratio = val_ratio) train_y_cc, val_y_cc = split_train_val(df = y_cc, val_ratio = val_ratio) train_X_ft, val_X_ft = split_train_val(df = X_ft, val_ratio = val_ratio) train_y_ft, val_y_ft = split_train_val(df = y_ft, val_ratio = val_ratio )<normalization>
kf = list(StratifiedKFold(y, n_folds=10, shuffle=True, random_state=4242)) [0] Xtr, Xte = Xtrain[kf[0], :], Xtrain[kf[1], :] ytr, yte = y[kf[0]], y[kf[1]] print('Training set: ' + str(Xtr.shape)) print('Validation set: ' + str(Xte.shape)) d_train = xgb.DMatrix(Xtr, label=ytr) d_valid = xgb.DMatrix(Xte, label=yte) watchlist = [(d_train, 'train'),(d_valid, 'eval')]
TalkingData Mobile User Demographics
93,236
<normalization><EOS>
clf = xgb.train(params, d_train, 1000, watchlist, early_stopping_rounds=25) pred = clf.predict(xgb.DMatrix(Xtest)) pred = pd.DataFrame(pred, index = gatest.index, columns=targetencoder.classes_) pred.head() pred.to_csv('sparse_xgb_v11.csv', index=True )
TalkingData Mobile User Demographics
87,441
<data_type_conversions><EOS>
%matplotlib inline
TalkingData Mobile User Demographics
87,441
<SOS> metric: MulticlassLoss Kaggle data source: talkingdata-mobile-user-demographics<compute_test_metric>
%matplotlib inline
TalkingData Mobile User Demographics
87,441
def root_mean_squared_log_error(y_true, y_pred): return K.sqrt(K.mean(K.square(K.log(y_pred + 1)- K.log(y_true + 1))))<choose_model_class>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv'))
TalkingData Mobile User Demographics
87,441
def LSTM_model(n_1, input_dim, output_dim): model = Sequential() model.add(LSTM(n_1,input_shape=(1, input_dim), activation='relu')) model.add(Dense(output_dim, activation='relu')) model.compile(loss=root_mean_squared_log_error, optimizer='adam') return model<choose_model_class>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv'))
TalkingData Mobile User Demographics
87,441
K.clear_session() model_cc = LSTM_model(4, X_train_cc.shape[-1], y_train_cc.shape[-1]) model_ft = LSTM_model(4, X_train_ft.shape[-1], y_train_ft.shape[-1]) early_stop_cc = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='min') early_stop_ft = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='min' )<train_model>
gatrain['trainrow'] = np.arange(gatrain.shape[0]) gatest['testrow'] = np.arange(gatest.shape[0] )
TalkingData Mobile User Demographics
87,441
print('Start model training') start_time = time.time() history_cc = model_cc.fit(X_train_cc, y_train_cc, batch_size = 16, epochs = 100,validation_data =(X_val_cc, y_val_cc), verbose = 2, callbacks=[early_stop_cc]) model_cc.save("model_cc.h5") print('Time spent for model training is {} minutes'.format(round(( time.time() -start_time)/60,1)) )<train_model>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
87,441
print('Start model training') start_time = time.time() history_ft = model_ft.fit(X_train_ft, y_train_ft, batch_size = 16, epochs = 8,validation_data =(X_val_ft, y_val_ft), verbose = 2, callbacks=[early_stop_ft]) model_ft.save("model_ft.h5") print('Time spent for model training is {} minutes'.format(round(( time.time() -start_time)/60,1)) )<predict_on_test>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
87,441
yhat_val_cc = model_cc.predict(X_val_cc) print(yhat_val_cc[50:70] )<predict_on_test>
m = phone.phone_brand.str.cat(phone.device_model) modelencoder = LabelEncoder().fit(m) phone['model'] = modelencoder.transform(m) gatrain['model'] = phone['model'] gatest['model'] = phone['model'] Xtr_model = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.model))) Xte_model = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.model))) print('Model features: train shape {}, test shape {}'.format(Xtr_model.shape, Xte_model.shape))
TalkingData Mobile User Demographics
87,441
yhat_val_ft = model_ft.predict(X_val_ft) print(yhat_val_ft[60:70] )<create_dataframe>
appencoder = LabelEncoder().fit(appevents.app_id) appevents['app'] = appencoder.transform(appevents.app_id) napps = len(appencoder.classes_) deviceapps =(appevents.merge(events[['device_id']], how='left',left_on='event_id',right_index=True) .groupby(['device_id','app'])['app'].agg(['size']) .merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True) .merge(gatest[['testrow']], how='left', left_index=True, right_index=True) .reset_index()) deviceapps.head()
TalkingData Mobile User Demographics
87,441
submission = pd.DataFrame() submission['ForecastId'] = test_df['ForecastId'] submission['ConfirmedCases'] = test_df['cc_cases'] submission['Fatalities'] = test_df['ft_cases']<save_to_csv>
d = deviceapps.dropna(subset=['trainrow']) Xtr_app = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.app)) , shape=(gatrain.shape[0],napps)) d = deviceapps.dropna(subset=['testrow']) Xte_app = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.app)) , shape=(gatest.shape[0],napps)) print('Apps data: train shape {}, test shape {}'.format(Xtr_app.shape, Xte_app.shape))
TalkingData Mobile User Demographics
87,441
submission.to_csv("submission.csv",index=False )<import_modules>
applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())] applabels['app'] = appencoder.transform(applabels.app_id) labelencoder = LabelEncoder().fit(applabels.label_id) applabels['label'] = labelencoder.transform(applabels.label_id) nlabels = len(labelencoder.classes_ )
TalkingData Mobile User Demographics
87,441
import numpy as np import pandas as pd import matplotlib.pyplot as plt import os<load_from_csv>
applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())] applabels['app'] = appencoder.transform(applabels.app_id) labelencoder = LabelEncoder().fit(applabels.label_id) applabels['label'] = labelencoder.transform(applabels.label_id) nlabels = len(labelencoder.classes_ )
TalkingData Mobile User Demographics
87,441
data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/train.csv',index_col='Date',parse_dates=True )<categorify>
devicelabels =(deviceapps[['device_id','app']] .merge(applabels[['app','label']]) .groupby(['device_id','label'])['app'].agg(['size']) .merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True) .merge(gatest[['testrow']], how='left', left_index=True, right_index=True) .reset_index()) devicelabels.head()
TalkingData Mobile User Demographics
87,441
data = data.fillna(value='empty' )<feature_engineering>
d = devicelabels.dropna(subset=['trainrow']) Xtr_label = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.label)) , shape=(gatrain.shape[0],nlabels)) d = devicelabels.dropna(subset=['testrow']) Xte_label = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.label)) , shape=(gatest.shape[0],nlabels)) print('Labels data: train shape {}, test shape {}'.format(Xtr_label.shape, Xte_label.shape))
TalkingData Mobile User Demographics
87,441
data['state_with_country'] = data['Province_State'] +'_'+ data['Country_Region']<drop_column>
Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label), format='csr') Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label), format='csr') print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape))
TalkingData Mobile User Demographics
87,441
data= data.drop(labels=['Province_State','Country_Region','Id'],axis=1 )<define_variables>
Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label), format='csr') Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label), format='csr') print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape))
TalkingData Mobile User Demographics
87,441
state_i_data =[] for i in state_with_country_name: state_i_data.append(i) <drop_column>
targetencoder = LabelEncoder().fit(gatrain.group) y = targetencoder.transform(gatrain.group) nclasses = len(targetencoder.classes_ )
TalkingData Mobile User Demographics
87,441
for i,j in enumerate(state_with_country_name): state_i_data[i] = data[data['state_with_country'] == j ] state_i_data[i] = state_i_data[i].drop(['state_with_country'],axis=1) <load_from_csv>
def score(clf, random_state = 0): kf = StratifiedKFold(y, n_folds=5, shuffle=True, random_state=random_state) pred = np.zeros(( y.shape[0],nclasses)) for itrain, itest in kf: Xtr, Xte = Xtrain[itrain, :], Xtrain[itest, :] ytr, yte = y[itrain], y[itest] clf.fit(Xtr, ytr) pred[itest,:] = clf.predict_proba(Xte) return log_loss(yte, pred[itest, :]) print("{:.5f}".format(log_loss(yte, pred[itest,:])) , end=' ') print('') return log_loss(y, pred )
TalkingData Mobile User Demographics
87,441
prev_sub = pd.read_csv('/kaggle/input/covid19-forecasting-using-rnn/submission.csv') test_whole = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv',index_col='Date',parse_dates=True) test_whole = test_whole.fillna(value='empty') <feature_engineering>
score(LogisticRegression(C=0.02))
TalkingData Mobile User Demographics
87,441
test_whole['state_with_country'] = test_whole['Province_State'] +'_'+ test_whole['Country_Region'] test_whole.head()<feature_engineering>
score(LogisticRegression(C=0.02, multi_class='multinomial',solver='lbfgs'))
TalkingData Mobile User Demographics
87,441
prev_sub.index = test_whole.index<prepare_output>
clf = LogisticRegression(C=0.02, multi_class='multinomial',solver='lbfgs') clf.fit(Xtrain, y) pred = pd.DataFrame(clf.predict_proba(Xtest), index = gatest.index, columns=targetencoder.classes_) pred.head()
TalkingData Mobile User Demographics
87,441
<define_variables><EOS>
pred.to_csv('logreg_subm.csv',index=True )
TalkingData Mobile User Demographics
144,515
<drop_column><EOS>
%matplotlib inline
TalkingData Mobile User Demographics
144,515
<SOS> metric: MulticlassLoss Kaggle data source: talkingdata-mobile-user-demographics<load_from_csv>
%matplotlib inline
TalkingData Mobile User Demographics
144,515
test_whole = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv',index_col='Date',parse_dates=True) test_whole = test_whole.fillna(value='empty' )<feature_engineering>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv'))
TalkingData Mobile User Demographics
144,515
test_whole['state_with_country'] = test_whole['Province_State'] +'_'+ test_whole['Country_Region'] test_whole.head()<define_variables>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv'))
TalkingData Mobile User Demographics
144,515
state_i_data_for_test=[] for i in state_with_country_name_for_test: state_i_data_for_test.append(i) <feature_engineering>
gatrain['trainrow'] = np.arange(gatrain.shape[0]) gatest['testrow'] = np.arange(gatest.shape[0] )
TalkingData Mobile User Demographics
144,515
for i,j in enumerate(state_with_country_name_for_test): state_i_data_for_test[i] = test_whole[test_whole['state_with_country'] == j ] state_i_data_for_test[i] = state_i_data_for_test[i].iloc[21:] state_i_data_for_test[i] = state_i_data_for_test[i].drop(['ForecastId','state_with_country','Province_State','Country_Region'],axis=1) state_i_data_for_test.append(state_i_data_for_test[i] )<import_modules>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
144,515
from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,LSTM from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator<normalization>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
144,515
result = [] full_scaler = MinMaxScaler()<train_on_grid>
m = phone.phone_brand.str.cat(phone.device_model) modelencoder = LabelEncoder().fit(m) phone['model'] = modelencoder.transform(m) gatrain['model'] = phone['model'] gatest['model'] = phone['model'] Xtr_model = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.model))) Xte_model = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.model))) print('Model features: train shape {}, test shape {}'.format(Xtr_model.shape, Xte_model.shape))
TalkingData Mobile User Demographics
144,515
for i in range(len(state_with_country_name_for_test)) : scaled_full_data = full_scaler.fit_transform(state_i_data[i]) length = 1 batch_size = 1 generator = TimeseriesGenerator(scaled_full_data, scaled_full_data, length=length, batch_size=1) model = Sequential() model.add(LSTM(96,input_shape=(length,scaled_full_data.shape[1]))) model.add(Dense(scaled_full_data.shape[1])) model.compile(optimizer='adam', loss='mse') model.fit_generator(generator,epochs=6) n_features = scaled_full_data.shape[1] test_predictions = [] first_eval_batch = scaled_full_data[-length:] current_batch = first_eval_batch.reshape(( 1, length, n_features)) for j in range(len(state_i_data_for_test[i])) : current_pred = model.predict(current_batch)[0] test_predictions.append(current_pred) current_batch = np.append(current_batch[:,1:,:],[[current_pred]],axis=1) true_predictions = full_scaler.inverse_transform(test_predictions) true_predictions = true_predictions.round() true_predictions = pd.DataFrame(data=true_predictions,columns=state_i_data[1].columns) result.append(true_predictions) print('count:-',i) <prepare_output>
appencoder = LabelEncoder().fit(appevents.app_id) appevents['app'] = appencoder.transform(appevents.app_id) napps = len(appencoder.classes_) deviceapps =(appevents.merge(events[['device_id']], how='left',left_on='event_id',right_index=True) .groupby(['device_id','app'])['app'].agg(['size']) .merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True) .merge(gatest[['testrow']], how='left', left_index=True, right_index=True) .reset_index()) deviceapps.head()
TalkingData Mobile User Demographics
144,515
prediction = pd.DataFrame(data= state_i_data[0].iloc[-21:] ,columns=['ConfirmedCases','Fatalities']) prediction = prediction.append(result[0] )<prepare_output>
d = deviceapps.dropna(subset=['trainrow']) Xtr_app = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.app)) , shape=(gatrain.shape[0],napps)) d = deviceapps.dropna(subset=['testrow']) Xte_app = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.app)) , shape=(gatest.shape[0],napps)) print('Apps data: train shape {}, test shape {}'.format(Xtr_app.shape, Xte_app.shape))
TalkingData Mobile User Demographics
144,515
for i in range(1,len(result)) : prediction = prediction.append(state_i_data[i].iloc[-21:]) prediction = prediction.append(result[i] )<define_variables>
applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())] applabels['app'] = appencoder.transform(applabels.app_id) labelencoder = LabelEncoder().fit(applabels.label_id) applabels['label'] = labelencoder.transform(applabels.label_id) nlabels = len(labelencoder.classes_ )
TalkingData Mobile User Demographics
144,515
prediction.index = range(0,len(prediction))<load_from_csv>
applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())] applabels['app'] = appencoder.transform(applabels.app_id) labelencoder = LabelEncoder().fit(applabels.label_id) applabels['label'] = labelencoder.transform(applabels.label_id) nlabels = len(labelencoder.classes_ )
TalkingData Mobile User Demographics
144,515
sub_format = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/submission.csv' )<drop_column>
devicelabels =(deviceapps[['device_id','app']] .merge(applabels[['app','label']]) .groupby(['device_id','label'])['app'].agg(['size']) .merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True) .merge(gatest[['testrow']], how='left', left_index=True, right_index=True) .reset_index()) devicelabels.head()
TalkingData Mobile User Demographics
144,515
sub_format = sub_format['ForecastId']<concatenate>
d = devicelabels.dropna(subset=['trainrow']) Xtr_label = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.label)) , shape=(gatrain.shape[0],nlabels)) d = devicelabels.dropna(subset=['testrow']) Xte_label = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.label)) , shape=(gatest.shape[0],nlabels)) print('Labels data: train shape {}, test shape {}'.format(Xtr_label.shape, Xte_label.shape))
TalkingData Mobile User Demographics
144,515
final = pd.concat([sub_format,prediction],axis=1 )<save_to_csv>
Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label), format='csr') Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label), format='csr') print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape))
TalkingData Mobile User Demographics
144,515
final.to_csv('submission.csv',index=False )<load_from_csv>
Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label), format='csr') Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label), format='csr') print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape))
TalkingData Mobile User Demographics
144,515
base_folder = '/kaggle/input/' data_base = base_folder + 'covid19-global-forecasting-week-2/' df = pd.read_csv(data_base + 'train.csv') df.rename(columns={'Province_State': 'Province/State', 'Country_Region': 'Country/Region'}, inplace=True) df['Province/State'].fillna('entire country', inplace=True) df<feature_engineering>
targetencoder = LabelEncoder().fit(gatrain.gender) y = targetencoder.transform(gatrain.gender) nclasses = len(targetencoder.classes_ )
TalkingData Mobile User Demographics
144,515
def add_extra_features_from_previous_days(data_fr, tail_size=5): cols_tmp = [] col_prefix = 'PreviousDay' for i in range(0, tail_size): col_cc = '{}-{}ConfirmedCases'.format(col_prefix, i) col_f = '{}-{}Fatalities'.format(col_prefix, i) data_fr[col_cc] = data_fr.groupby(['Country/Region', 'Province/State'])['ConfirmedCases'].shift(periods=i+1, fill_value=0) data_fr[col_f] = data_fr.groupby(['Country/Region', 'Province/State'])['Fatalities'].shift(periods=i+1, fill_value=0) data_fr[col_cc + 'Delta'] = data_fr.groupby(['Country/Region', 'Province/State'])[col_cc].diff().fillna(0) data_fr[col_f + 'Delta'] = data_fr.groupby(['Country/Region', 'Province/State'])[col_f].diff().fillna(0) cols_tmp += [col_cc, col_f, col_cc + 'Delta', col_f + 'Delta'] return cols_tmp TAIL = 5 previous_days_cols = add_extra_features_from_previous_days(df, TAIL) df<data_type_conversions>
clf = LogisticRegression(C=0.08) clf.fit(Xtrain[70001:], y[70001:]) pred = pd.DataFrame(clf.predict_proba(Xtrain[70001:]), index=gatrain.iloc[70001:].index, columns=targetencoder.classes_) pred.head()
TalkingData Mobile User Demographics
144,515
<feature_engineering><EOS>
pred.to_csv('test_gender.csv',index=True )
TalkingData Mobile User Demographics
86,315
<load_from_csv><EOS>
%matplotlib inline
TalkingData Mobile User Demographics
86,315
<SOS> metric: MulticlassLoss Kaggle data source: talkingdata-mobile-user-demographics<define_variables>
%matplotlib inline
TalkingData Mobile User Demographics
86,315
countries_to_replace = [ ('Czech Republic', 'Czechia'), ('United States of America', 'US'), ('Côte d'Ivoire(Ivory Coast)', 'Côte d'Ivoire'), ('Korea(South)', 'Korea, South'), ('Swaziland', 'Eswatini'), ('Gambia', 'The Gambia'), ('Myanmar(Burma)', 'Myanmar'), ('East Timor', 'Timor-Leste'), ('Macedonia', 'North Macedonia'), ('Cape Verde', 'Cabo Verde'), ('Congo(Republic)', 'Congo(Brazzaville)'), ('Congo(Democratic Republic)', 'Congo(Kinshasa)'), ('Palestinian Territories', 'State of Palestine'), ('Bahamas', 'The Bahamas'), ('United Kingdom of Great Britain and Northern Ireland', 'United Kingdom'), ('Vatican City', 'Holy See') ] csv_dir = base_folder + 'worldbymap/' files = [ 'labor_force', 'death_rate', 'air_traffic_passengers', 'hospital_bed_density', 'obesity', 'old_people', 'physicians_density' ] wbm = {} for f in files: wbm[f] = pd.read_csv(csv_dir + f + '.csv', delimiter=';', decimal=',', na_values='N.A.') for ctr in countries_to_replace: wbm[f] = wbm[f].replace(ctr[0], ctr[1]) wbm[files[0]]<merge>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv'))
TalkingData Mobile User Demographics
86,315
df_add = pd.DataFrame() for dataset in wbm.keys() : if df_add.shape ==(0, 0): df_add = wbm[dataset].copy() else: df_add = df_add.merge(wbm[dataset], on='Country', how='left') df_add.rename(columns={"Country": "Country/Region"}, inplace=True) df_add<merge>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv'))
TalkingData Mobile User Demographics
86,315
df_external = pd.merge(df_population, df_add, on='Country/Region', how='left') <merge>
gatrain['trainrow'] = np.arange(gatrain.shape[0]) gatest['testrow'] = np.arange(gatest.shape[0] )
TalkingData Mobile User Demographics
86,315
df_pop = pd.merge(df, df_external, on=['Country/Region', 'Province/State'], how='left') df_pop<define_variables>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
86,315
ext_cols = ['LaborForceTotal', 'LaborForcePerCapita ', 'DeathRate', 'AirTrafficPassengersTotal', 'AirTrafficPassengersPerCapita', 'HospitalBedDensity', 'Obesity', 'OldPeople', 'PhysiciansDensity'] ext_cols = ['LaborForcePerCapita ', 'DeathRate', 'AirTrafficPassengersPerCapita', 'HospitalBedDensity', 'Obesity', 'OldPeople', 'PhysiciansDensity'] pop_cols = ['DayNum', 'RealDayNum', 'Yearly change', 'Density', 'Land Area', 'Med.Age', 'Urban Pop', 'Population'] pop_cols = ['DayNum', 'Med.Age', 'Urban Pop', 'Density'] model_x_columns_without_dummies = pop_cols + ext_cols + previous_days_cols + special_cols model_x_columns = model_x_columns_without_dummies + continent_columns def rmsle(ytrue, ypred): return np.sqrt(mean_squared_log_error(ytrue, ypred)) def mae(ytrue, ypred): return mean_absolute_error(ytrue, ypred) def analyse(data_y_test, data_y_pred): chart_data = pd.DataFrame({'x1': data_y_test.flatten() , 'x2': data_y_pred.flatten() , 'y': np.abs(data_y_test.flatten() -data_y_pred.flatten() ).flatten() }) sns.scatterplot(x='x1', y='y', data=chart_data, color='black') sns.scatterplot(x='x2', y='y', data=chart_data, color='red') print('RMSLE: {}'.format(round(rmsle(data_y_test, data_y_pred), 6))) def analyse2(tr_y, tr_pred, data_y_test, data_y_pred): chart_data0 = pd.DataFrame({ 'x00': tr_y.flatten() , 'x01': tr_pred.flatten() , 'y0': np.abs(tr_y.flatten() -tr_pred.flatten() ).flatten() }) chart_data1 = pd.DataFrame({ 'x10': data_y_test.flatten() , 'x11': data_y_pred.flatten() , 'y1': np.abs(data_y_test.flatten() -data_y_pred.flatten() ).flatten() }) fig, ax =plt.subplots(1,2) sns.scatterplot(x='x00', y='y0', data=chart_data0, color='blue', ax=ax[0]) sns.scatterplot(x='x01', y='y0', data=chart_data0, color='yellow', ax=ax[0]) sns.scatterplot(x='x10', y='y1', data=chart_data1, color='black', ax=ax[1]) sns.scatterplot(x='x11', y='y1', data=chart_data1, color='red', ax=ax[1]) print('RMSLE train: {}'.format(round(rmsle(tr_y, tr_pred), 6))) print('RMSLE test: {}'.format(round(rmsle(data_y_test, data_y_pred), 6))) def prepare_data(df, what_to_predict, test_size=0.3, dropna=False): df_tmp = df.copy() if dropna: df_tmp.dropna(inplace=True) data_X = df_tmp[model_x_columns] data_y = np.log1p(df_tmp[[what_to_predict]].values.flatten()) return train_test_split(data_X, data_y, test_size=test_size, random_state=42) def predict_output(input_data, model): df_final = input_data[model_x_columns].copy() y_pred = model.predict(df_final) return y_pred def expm1_relu(y): tmp = np.expm1(y) tmp[tmp<0]=0 return np.around(tmp )<train_model>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
86,315
data_X_tr, data_X_test, data_y_tr, data_y_test = prepare_data(df_pop, 'Fatalities', test_size=0.25, dropna=True) model_f = XGBRegressor(learning_rate=0.1, n_estimators=50, max_depth=3, min_child_weight=0, gamma=0, subsample=0.65, colsample_bytree=0.65, reg_alpha=0, reg_lambda=0, objective='reg:squarederror', scale_pos_weight=1, seed=37) model_f.fit(data_X_tr, data_y_tr) tr_pred = predict_output(data_X_tr, model_f) data_y_pred = predict_output(data_X_test, model_f) analyse2(expm1_relu(data_y_tr), expm1_relu(tr_pred), expm1_relu(data_y_test), expm1_relu(data_y_pred))<split>
m = phone.phone_brand.str.cat(phone.device_model) modelencoder = LabelEncoder().fit(m) phone['model'] = modelencoder.transform(m) gatrain['model'] = phone['model'] gatest['model'] = phone['model'] Xtr_model = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.model))) Xte_model = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.model))) print('Model features: train shape {}, test shape {}'.format(Xtr_model.shape, Xte_model.shape))
TalkingData Mobile User Demographics
86,315
model_cc = XGBRegressor(learning_rate=0.08, n_estimators=400, max_depth=6, min_child_weight=0, gamma=0, subsample=0.7, colsample_bytree=0.78, reg_alpha=0, reg_lambda=28, objective='reg:squarederror', scale_pos_weight=1, seed=37) data_X_tr, data_X_test, data_y_tr, data_y_test = prepare_data(df_pop, 'ConfirmedCases', test_size=0.3, dropna=True) hist = model_cc.fit(data_X_tr, data_y_tr) tr_pred = predict_output(data_X_tr, model_cc) data_y_pred = predict_output(data_X_test, model_cc) analyse2(expm1_relu(data_y_tr), expm1_relu(tr_pred), expm1_relu(data_y_test), expm1_relu(data_y_pred))<feature_engineering>
appencoder = LabelEncoder().fit(appevents.app_id) appevents['app'] = appencoder.transform(appevents.app_id) napps = len(appencoder.classes_) deviceapps =(appevents.merge(events[['device_id']], how='left',left_on='event_id',right_index=True) .groupby(['device_id','app'])['app'].agg(['size']) .merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True) .merge(gatest[['testrow']], how='left', left_index=True, right_index=True) .reset_index()) deviceapps.head()
TalkingData Mobile User Demographics
86,315
df_test = pd.read_csv(data_base + 'test.csv') df_test.rename(columns={'Province_State': 'Province/State', 'Country_Region': 'Country/Region'}, inplace=True) df_test['Province/State'].fillna('entire country', inplace=True) df_test['DayNum'] =(df_test['Date'].astype('datetime64[ns]')- day_zero ).apply(lambda x: int(x.days)) for c1, c2 in zip(special_cols1, special_cols2): zero_days = pd.DataFrame(df.groupby(['Country/Region', 'Province/State', c1] ).size().reset_index() [['Country/Region', 'Province/State', c1]]) zero_days.drop_duplicates(subset=['Country/Region', 'Province/State'], keep='last', inplace=True) df_test = df_test.merge(zero_days, on=['Country/Region', 'Province/State'], how='left') df_test<merge>
d = deviceapps.dropna(subset=['trainrow']) Xtr_app = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.app)) , shape=(gatrain.shape[0],napps)) d = deviceapps.dropna(subset=['testrow']) Xte_app = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.app)) , shape=(gatest.shape[0],napps)) print('Apps data: train shape {}, test shape {}'.format(Xtr_app.shape, Xte_app.shape))
TalkingData Mobile User Demographics
86,315
for c1, c2 in zip(special_cols1, special_cols2): real_day_num = df_test['DayNum'] - df_test[c1] + 1 df_test[c2] = real_day_num - real_day_num.where(real_day_num<0 ).fillna(0) df_test_pop = pd.merge(df_test, df_external, on=['Country/Region', 'Province/State'], how='left') df_test_pop.fillna(df_test_pop.mean() , inplace=True) df_test_pop<merge>
applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())] applabels['app'] = appencoder.transform(applabels.app_id) labelencoder = LabelEncoder().fit(applabels.label_id) applabels['label'] = labelencoder.transform(applabels.label_id) nlabels = len(labelencoder.classes_ )
TalkingData Mobile User Demographics
86,315
output_columns = ['ConfirmedCases', 'Fatalities'] tmp_output_columns = ['ConfirmedCases_y', 'Fatalities_y'] last_training_day = df['DayNum'].max() first_test_day = df_test['DayNum'].min() train_test_keys = ['Country/Region', 'Province/State', 'DayNum'] df_test_pop = pd.merge(df_test_pop, df[df['DayNum']>=first_test_day][train_test_keys+output_columns], on=train_test_keys, how='left') df_test_pop[output_columns] = df_test_pop[output_columns].fillna(0 ).copy() df_test_pop<predict_on_test>
applabels = applabels.loc[applabels.app_id.isin(appevents.app_id.unique())] applabels['app'] = appencoder.transform(applabels.app_id) labelencoder = LabelEncoder().fit(applabels.label_id) applabels['label'] = labelencoder.transform(applabels.label_id) nlabels = len(labelencoder.classes_ )
TalkingData Mobile User Demographics
86,315
last_test_day = df_test['DayNum'].max() for day in range(last_training_day+1, last_test_day+1): print('predicting day {}({} to go)'.format(day, last_test_day-day)) up_to_current_day = df_test_pop.where(df_test['DayNum']<=day ).dropna(subset=['Country/Region']) previous_days_columns = add_extra_features_from_previous_days(up_to_current_day, TAIL) up_to_current_day['ConfirmedCases'] = expm1_relu(predict_output(up_to_current_day[model_x_columns], model_cc)) up_to_current_day['Fatalities'] = expm1_relu(predict_output(up_to_current_day[model_x_columns], model_f)) tmp_dataset = up_to_current_day[up_to_current_day['DayNum']==day][train_test_keys+output_columns] df_test_pop = pd.merge(df_test_pop, tmp_dataset, on=train_test_keys, how='left', suffixes=('', '_y')) df_test_pop[tmp_output_columns] = df_test_pop[tmp_output_columns].fillna(0 ).copy() df_test_pop['ConfirmedCases'] += df_test_pop['ConfirmedCases_y'] df_test_pop['Fatalities'] += df_test_pop['Fatalities_y'] df_test_pop.drop(columns=tmp_output_columns, inplace=True) <filter>
devicelabels =(deviceapps[['device_id','app']] .merge(applabels[['app','label']]) .groupby(['device_id','label'])['app'].agg(['size']) .merge(gatrain[['trainrow']], how='left', left_index=True, right_index=True) .merge(gatest[['testrow']], how='left', left_index=True, right_index=True) .reset_index()) devicelabels.head()
TalkingData Mobile User Demographics
86,315
up_to_current_day[up_to_current_day.isnull().any(axis=1)]<save_to_csv>
d = devicelabels.dropna(subset=['trainrow']) Xtr_label = csr_matrix(( np.ones(d.shape[0]),(d.trainrow, d.label)) , shape=(gatrain.shape[0],nlabels)) d = devicelabels.dropna(subset=['testrow']) Xte_label = csr_matrix(( np.ones(d.shape[0]),(d.testrow, d.label)) , shape=(gatest.shape[0],nlabels)) print('Labels data: train shape {}, test shape {}'.format(Xtr_label.shape, Xte_label.shape))
TalkingData Mobile User Demographics
86,315
submission_columns = ['ForecastId', 'ConfirmedCases', 'Fatalities'] df_test_pop[submission_columns].to_csv('submission.csv', index=False) df_test_pop<import_modules>
Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label), format='csr') Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label), format='csr') print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape))
TalkingData Mobile User Demographics
86,315
plotly.offline.init_notebook_mode() %matplotlib inline def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
Xtrain = hstack(( Xtr_brand, Xtr_model, Xtr_app, Xtr_label), format='csr') Xtest = hstack(( Xte_brand, Xte_model, Xte_app, Xte_label), format='csr') print('All features: train shape {}, test shape {}'.format(Xtrain.shape, Xtest.shape))
TalkingData Mobile User Demographics
86,315
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering>
targetencoder = LabelEncoder().fit(gatrain.group) y = targetencoder.transform(gatrain.group) nclasses = len(targetencoder.classes_ )
TalkingData Mobile User Demographics
86,315
FirstDate = train.groupby('Country_Region' ).min() ['Date'].unique() [0] train['Last Confirm'] = train['ConfirmedCases'].shift(1) while train[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate)].shape[0] > 0: train['Last Confirm'] = train['ConfirmedCases'].shift(1) train['Last Fatalities'] = train['Fatalities'].shift(1) train.loc[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate),'ConfirmedCases'] = train.loc[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate),'Last Confirm'] train.loc[(train['Last Fatalities'] > train['Fatalities'])&(train['Date'] > FirstDate),'Fatalities'] = train.loc[(train['Last Fatalities'] > train['Fatalities'])&(train['Date'] > FirstDate),'Last Fatalities'] train['Last Confirm'] = train['ConfirmedCases'].shift(1) train['Last Fatalities'] = train['Fatalities'].shift(1 )<feature_engineering>
import xgboost as xgb
TalkingData Mobile User Demographics
86,315
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] adjusted_X_pred = X_pred[feature_use].values.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(2), BayesianRidge()) model.fit(adjusted_X_train,adjusted_y_train_confirmed) y_hat_confirmed = model.predict(adjusted_X_pred) model.fit(adjusted_X_train,adjusted_y_train_fatalities) y_hat_fatalities = model.predict(adjusted_X_pred) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_1 = df_val.copy()<compute_test_metric>
dtrain = xgb.DMatrix(Xtrain, y )
TalkingData Mobile User Demographics
86,315
RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values )<compute_test_metric>
params = { "eta": 0.1, "booster": "gblinear", "objective": "multi:softprob", "alpha": 4, "lambda": 0, "silent": 1, "seed": 1233, "num_class": 12, "eval_metric": "mlogloss" }
TalkingData Mobile User Demographics
86,315
RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values )<groupby>
xgb.cv(params, dtrain, num_boost_round=50, maximize = False )
TalkingData Mobile User Demographics
86,315
country = "Vietnam" df_val = df_val_1 df_val[df_val['Country_Region'] == country].groupby(['Date','Country_Region'] ).sum().reset_index()<save_model>
model = RandomForestClassifier()
TalkingData Mobile User Demographics
86,315
animator.save('confirm_animation.gif', writer='imagemagick', fps=2) display(Image(url='confirm_animation.gif'))<feature_engineering>
model = xgb.train(params, dtrain, num_boost_round=25 )
TalkingData Mobile User Demographics
86,315
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit() y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit() y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_2 = df_val.copy()<feature_engineering>
dtest = xgb.DMatrix(Xtest )
TalkingData Mobile User Demographics
86,315
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()<compute_test_metric>
model.predict(dtest) pred = pd.DataFrame(model.predict(dtest), index = gatest.index, columns=targetencoder.classes_ )
TalkingData Mobile User Demographics
86,315
<save_to_csv><EOS>
pred.to_csv('xgb_subm.csv',index=True )
TalkingData Mobile User Demographics
90,625
<filter><EOS>
%matplotlib inline
TalkingData Mobile User Demographics
90,625
<SOS> metric: MulticlassLoss Kaggle data source: talkingdata-mobile-user-demographics<import_modules>
%matplotlib inline
TalkingData Mobile User Demographics
90,625
import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error from sklearn.linear_model import LinearRegression, Ridge import datetime import gc from tqdm import tqdm<load_from_csv>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv'))
TalkingData Mobile User Demographics
90,625
def get_cpmp_sub(save_oof=False, save_public_test=False): train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/train.csv') train['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) train['day'] = train.Date.dt.dayofyear train['geo'] = ['_'.join(x)for x in zip(train['Country_Region'], train['Province_State'])] train test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv') test['Province_State'].fillna('', inplace=True) test['Date'] = pd.to_datetime(test['Date']) test['day'] = test.Date.dt.dayofyear test['geo'] = ['_'.join(x)for x in zip(test['Country_Region'], test['Province_State'])] test day_min = train['day'].min() train['day'] -= day_min test['day'] -= day_min min_test_val_day = test.day.min() max_test_val_day = train.day.max() max_test_day = test.day.max() num_days = max_test_day + 1 min_test_val_day, max_test_val_day, num_days train['ForecastId'] = -1 test['Id'] = -1 test['ConfirmedCases'] = 0 test['Fatalities'] = 0 debug = False data = pd.concat([train, test[test.day > max_test_val_day][train.columns] ] ).reset_index(drop=True) if debug: data = data[data['geo'] >= 'France_'].reset_index(drop=True) gc.collect() dates = data[data['geo'] == 'France_'].Date.values if 0: gr = data.groupby('geo') data['ConfirmedCases'] = gr.ConfirmedCases.transform('cummax') data['Fatalities'] = gr.Fatalities.transform('cummax') geo_data = data.pivot(index='geo', columns='day', values='ForecastId') num_geo = geo_data.shape[0] geo_data geo_id = {} for i,g in enumerate(geo_data.index): geo_id[g] = i ConfirmedCases = data.pivot(index='geo', columns='day', values='ConfirmedCases') Fatalities = data.pivot(index='geo', columns='day', values='Fatalities') if debug: cases = ConfirmedCases.values deaths = Fatalities.values else: cases = np.log1p(ConfirmedCases.values) deaths = np.log1p(Fatalities.values) def get_dataset(start_pred, num_train, lag_period): days = np.arange(start_pred - num_train + 1, start_pred + 1) lag_cases = np.vstack([cases[:, d - lag_period : d] for d in days]) lag_deaths = np.vstack([deaths[:, d - lag_period : d] for d in days]) target_cases = np.vstack([cases[:, d : d + 1] for d in days]) target_deaths = np.vstack([deaths[:, d : d + 1] for d in days]) geo_ids = np.vstack([geo_ids_base for d in days]) country_ids = np.vstack([country_ids_base for d in days]) return lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days def update_valid_dataset(data, pred_death, pred_case): lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days = data day = days[-1] + 1 new_lag_cases = np.hstack([lag_cases[:, 1:], pred_case]) new_lag_deaths = np.hstack([lag_deaths[:, 1:], pred_death]) new_target_cases = cases[:, day:day+1] new_target_deaths = deaths[:, day:day+1] new_geo_ids = geo_ids new_country_ids = country_ids new_days = 1 + days return new_lag_cases, new_lag_deaths, new_target_cases, new_target_deaths, new_geo_ids, new_country_ids, new_days def fit_eval(lr_death, lr_case, data, start_lag_death, end_lag_death, num_lag_case, fit, score): lag_cases, lag_deaths, target_cases, target_deaths, geo_ids, country_ids, days = data X_death = np.hstack([lag_cases[:, -start_lag_death:-end_lag_death], country_ids]) X_death = np.hstack([lag_deaths[:, -num_lag_case:], country_ids]) X_death = np.hstack([lag_cases[:, -start_lag_death:-end_lag_death], lag_deaths[:, -num_lag_case:], country_ids]) y_death = target_deaths y_death_prev = lag_deaths[:, -1:] if fit: if 0: keep =(y_death > 0 ).ravel() X_death = X_death[keep] y_death = y_death[keep] y_death_prev = y_death_prev[keep] lr_death.fit(X_death, y_death) y_pred_death = lr_death.predict(X_death) y_pred_death = np.maximum(y_pred_death, y_death_prev) X_case = np.hstack([lag_cases[:, -num_lag_case:], geo_ids]) X_case = lag_cases[:, -num_lag_case:] y_case = target_cases y_case_prev = lag_cases[:, -1:] if fit: lr_case.fit(X_case, y_case) y_pred_case = lr_case.predict(X_case) y_pred_case = np.maximum(y_pred_case, y_case_prev) if score: death_score = val_score(y_death, y_pred_death) case_score = val_score(y_case, y_pred_case) else: death_score = 0 case_score = 0 return death_score, case_score, y_pred_death, y_pred_case def train_model(train, valid, start_lag_death, end_lag_death, num_lag_case, num_val, score=True): alpha = 3 lr_death = Ridge(alpha=alpha, fit_intercept=False) lr_case = Ridge(alpha=alpha, fit_intercept=True) (train_death_score, train_case_score, train_pred_death, train_pred_case, )= fit_eval(lr_death, lr_case, train, start_lag_death, end_lag_death, num_lag_case, fit=True, score=score) death_scores = [] case_scores = [] death_pred = [] case_pred = [] for i in range(num_val): (valid_death_score, valid_case_score, valid_pred_death, valid_pred_case, )= fit_eval(lr_death, lr_case, valid, start_lag_death, end_lag_death, num_lag_case, fit=False, score=score) death_scores.append(valid_death_score) case_scores.append(valid_case_score) death_pred.append(valid_pred_death) case_pred.append(valid_pred_case) if 0: print('val death: %0.3f' % valid_death_score, 'val case: %0.3f' % valid_case_score, 'val : %0.3f' % np.mean([valid_death_score, valid_case_score]), flush=True) valid = update_valid_dataset(valid, valid_pred_death, valid_pred_case) if score: death_scores = np.sqrt(np.mean([s**2 for s in death_scores])) case_scores = np.sqrt(np.mean([s**2 for s in case_scores])) if 0: print('train death: %0.3f' % train_death_score, 'train case: %0.3f' % train_case_score, 'val death: %0.3f' % death_scores, 'val case: %0.3f' % case_scores, 'val : %0.3f' %(( death_scores + case_scores)/ 2), flush=True) else: print('%0.4f' % case_scores, ', %0.4f' % death_scores, '= %0.4f' %(( death_scores + case_scores)/ 2), flush=True) death_pred = np.hstack(death_pred) case_pred = np.hstack(case_pred) return death_scores, case_scores, death_pred, case_pred countries = [g.split('_')[0] for g in geo_data.index] countries = pd.factorize(countries)[0] country_ids_base = countries.reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) country_ids_base = 0.2 * ohe.fit_transform(country_ids_base) country_ids_base.shape geo_ids_base = np.arange(num_geo ).reshape(( -1, 1)) ohe = OneHotEncoder(sparse=False) geo_ids_base = 0.1 * ohe.fit_transform(geo_ids_base) geo_ids_base.shape def val_score(true, pred): pred = np.log1p(np.round(np.expm1(pred)- 0.2)) return np.sqrt(mean_squared_error(true.ravel() , pred.ravel())) def val_score(true, pred): return np.sqrt(mean_squared_error(true.ravel() , pred.ravel())) start_lag_death, end_lag_death = 14, 6, num_train = 5 num_lag_case = 14 lag_period = max(start_lag_death, num_lag_case) def get_oof(start_val_delta=0): start_val = min_test_val_day + start_val_delta last_train = start_val - 1 num_val = max_test_val_day - start_val + 1 print(dates[start_val], start_val, num_val) train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) _, _, val_death_preds, val_case_preds = train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases sub = train[['Date', 'Id', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub[sub.day >= start_val] sub = sub[['Id', 'ConfirmedCases', 'Fatalities']].copy() return sub if save_oof: for start_val_delta, date in zip(range(3, -8, -3), ['2020-03-22', '2020-03-19', '2020-03-16', '2020-03-13']): print(date, end=' ') oof = get_oof(start_val_delta) oof.to_csv('.. /submissions/cpmp-%s.csv' % date, index=None) def get_sub(start_val_delta=0): start_val = min_test_val_day + start_val_delta last_train = start_val - 1 num_val = max_test_val_day - start_val + 1 print(dates[last_train], start_val, num_val) train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) _, _, val_death_preds, val_case_preds = train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases sub = test[['Date', 'ForecastId', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub.fillna(0) sub = sub[['ForecastId', 'ConfirmedCases', 'Fatalities']] return sub return sub known_test = train[['geo', 'day', 'ConfirmedCases', 'Fatalities'] ].merge(test[['geo', 'day', 'ForecastId']], how='left', on=['geo', 'day']) known_test = known_test[['ForecastId', 'ConfirmedCases', 'Fatalities']][known_test.ForecastId.notnull() ].copy() known_test unknow_test = test[test.day > max_test_val_day] unknow_test def get_final_sub() : start_val = max_test_val_day + 1 last_train = start_val - 1 num_val = max_test_day - start_val + 1 print(dates[last_train], start_val, num_val) train_data = get_dataset(last_train, num_train, lag_period) valid_data = get_dataset(start_val, 1, lag_period) (_, _, val_death_preds, val_case_preds )= train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val, score=False) pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy() pred_deaths.iloc[:, :] = np.expm1(val_death_preds) pred_deaths = pred_deaths.stack().reset_index() pred_deaths.columns = ['geo', 'day', 'Fatalities'] pred_deaths pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy() pred_cases.iloc[:, :] = np.expm1(val_case_preds) pred_cases = pred_cases.stack().reset_index() pred_cases.columns = ['geo', 'day', 'ConfirmedCases'] pred_cases print(unknow_test.shape, pred_deaths.shape, pred_cases.shape) sub = unknow_test[['Date', 'ForecastId', 'geo', 'day']] sub = sub.merge(pred_cases, how='left', on=['geo', 'day']) sub = sub.merge(pred_deaths, how='left', on=['geo', 'day']) sub = sub[['ForecastId', 'ConfirmedCases', 'Fatalities']] sub = pd.concat([known_test, sub]) return sub if save_public_test: sub = get_sub() else: sub = get_final_sub() return sub<data_type_conversions>
datadir = '.. /input' gatrain = pd.read_csv(os.path.join(datadir,'gender_age_train.csv'), index_col='device_id') gatest = pd.read_csv(os.path.join(datadir,'gender_age_test.csv'), index_col = 'device_id') phone = pd.read_csv(os.path.join(datadir,'phone_brand_device_model.csv')) phone = phone.drop_duplicates('device_id',keep='first' ).set_index('device_id') events = pd.read_csv(os.path.join(datadir,'events.csv'), parse_dates=['timestamp'], index_col='event_id') appevents = pd.read_csv(os.path.join(datadir,'app_events.csv'), usecols=['event_id','app_id','is_active'], dtype={'is_active':bool}) applabels = pd.read_csv(os.path.join(datadir,'app_labels.csv'))
TalkingData Mobile User Demographics
90,625
sub = get_cpmp_sub() sub['ForecastId'] = sub['ForecastId'].astype('int') sub<save_to_csv>
gatrain['trainrow'] = np.arange(gatrain.shape[0]) gatest['testrow'] = np.arange(gatest.shape[0] )
TalkingData Mobile User Demographics
90,625
sub.to_csv('submission.csv', index=False )<save_to_csv>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics
90,625
sub.to_csv('submission.csv', index=False )<set_options>
brandencoder = LabelEncoder().fit(phone.phone_brand) phone['brand'] = brandencoder.transform(phone['phone_brand']) gatrain['brand'] = phone['brand'] gatest['brand'] = phone['brand'] Xtr_brand = csr_matrix(( np.ones(gatrain.shape[0]), (gatrain.trainrow, gatrain.brand))) Xte_brand = csr_matrix(( np.ones(gatest.shape[0]), (gatest.testrow, gatest.brand))) print('Brand features: train shape {}, test shape {}'.format(Xtr_brand.shape, Xte_brand.shape))
TalkingData Mobile User Demographics