kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,283,112
submission = [] for index, rt in log_regions.iterrows() : st = rt['Province_State'] co = rt['Country_Region'] popt = list(rt['ConfirmedCases']) popt_ = list(rt['Fatalities']) if popt_ == [0.0,0.0,69.0]: popt_ = np.multiply(fp,popt) print(co,st,popt,popt_) rtest = test[(test['Province_State']==st)&(test['Country_Region']==co)] for index, rt in rtest.iterrows() : try: tdate = rt['Date'] ca = logistic([date_day_diff(tdate, min(train_[(train_['Province_State']==st)&(train_['Country_Region']==co)]['Date'].values)) ], *popt) try: fa = logistic([date_day_diff(tdate, min(train_[(train_['Province_State']==st)&(train_['Country_Region']==co)]['Date'].values)) ], *popt_) except: fa = f([date_day_diff(tdate, min(train_[(train_['Province_State']==st)&(train_['Country_Region']==co)]['Date'].values)) ], *popt_) submission.append(( rt['ForecastId'], int(ca[0]), int(fa[0]))) except: tdate = rt['Date'] ca = f([date_day_diff(tdate, min(train_[(train_['Province_State']==st)&(train_['Country_Region']==co)]['Date'].values)) ], *popt) fa = f([date_day_diff(tdate, min(train_[(train_['Province_State']==st)&(train_['Country_Region']==co)]['Date'].values)) ], *popt_) submission.append(( rt['ForecastId'], int(ca[0]), int(fa[0]))) print("All done!" )<save_to_csv>
submission=pd.DataFrame({'Col1':test.Col1,'Col2':lr.predict(X_test)} )
KNIT_HACKS
9,283,112
<import_modules><EOS>
submission.to_csv('submission_reduction.csv',index=False )
KNIT_HACKS
8,789,450
<SOS> metric: rmse Kaggle data source: data-series-summarization-project-(v3)<load_from_csv>
inputFolderPath = '/kaggle/input/data-series-summarization-project-v3/' outputFolderPath = '/kaggle/working/' filename = 'synthetic_size50k_len256_znorm.bin' inputFilePath = inputFolderPath + filename
Data Series Summarization Project (v3)
8,789,450
df=pd.read_csv("/kaggle/input/covid19-global-forecasting-week-2/train.csv", usecols=['Province_State','Country_Region','Date','ConfirmedCases','Fatalities']) <drop_column>
def sum32(inputFilePath): summary_filepath = outputFolderPath + filename + '_sum32' time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256) summary50k = [] for time_series in time_series50k: summary50k.append(time_series[0]) summary50knp = np.array(summary50k,dtype=np.float32) summary50knp.tofile(summary_filepath) return summary_filepath def rec32(summary_filepath): reconstructed_filepath = summary_filepath + '_rec32' summary50k = np.fromfile(summary_filepath, dtype=np.float32) reconstructed50k = [] for summary in summary50k: reconstructed50k.append([summary]*256) reconstructed50knp = np.array(reconstructed50k,dtype=np.float32) reconstructed50knp.tofile(reconstructed_filepath) return reconstructed_filepath def sum64(inputFilePath): summary_filepath = outputFolderPath + filename + '_sum64' time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256) summary50k = [] for time_series in time_series50k: summary50k.append(time_series[0]) summary50k.append(time_series[0]) summary50knp = np.array(summary50k,dtype=np.float32) summary50knp.tofile(summary_filepath) return summary_filepath def rec64(summary_filepath): reconstructed_filepath = summary_filepath + '_rec64' summary50k = np.fromfile(summary_filepath, dtype=np.float32 ).reshape(-1, 2) reconstructed50k = [] for summary in summary50k: reconstructed50k.append([summary[0]]*256) reconstructed50knp = np.array(reconstructed50k,dtype=np.float32) reconstructed50knp.tofile(reconstructed_filepath) return reconstructed_filepath def sum128(inputFilePath): summary_filepath = outputFolderPath + filename + '_sum128' time_series50k = np.fromfile(inputFilePath, dtype=np.float32 ).reshape(-1, 256) summary50k = [] for time_series in time_series50k: summary50k.append(time_series[0]) summary50k.append(time_series[0]) summary50k.append(time_series[0]) summary50k.append(time_series[0]) summary50knp = np.array(summary50k,dtype=np.float32) summary50knp.tofile(summary_filepath) return summary_filepath def rec128(summary_filepath): reconstructed_filepath = summary_filepath + '_rec128' summary50k = np.fromfile(summary_filepath, dtype=np.float32 ).reshape(-1, 4) reconstructed50k = [] for summary in summary50k: reconstructed50k.append([summary[0]]*256) reconstructed50knp = np.array(reconstructed50k,dtype=np.float32) reconstructed50knp.tofile(reconstructed_filepath) return reconstructed_filepath
Data Series Summarization Project (v3)
8,789,450
<load_from_csv><EOS>
s32= sum32(inputFilePath) r32 = rec32(s32) pred32=np.fromfile(r32, dtype=np.float32) s64= sum64(inputFilePath) r64 = rec64(s64) pred64=np.fromfile(r64, dtype=np.float32) s128= sum128(inputFilePath) r128 = rec128(s128) pred128=np.fromfile(r128, dtype=np.float32) output = [] globalCsvIndex = 0 for i in range(len(pred32)) : output.append([globalCsvIndex,pred32[i]]) globalCsvIndex = globalCsvIndex+1 for i in range(len(pred64)) : output.append([globalCsvIndex,pred64[i]]) globalCsvIndex = globalCsvIndex+1 for i in range(len(pred128)) : output.append([globalCsvIndex,pred128[i]]) globalCsvIndex = globalCsvIndex+1 with open('submission.csv', 'w', newline='')as file: writer = csv.writer(file) writer.writerow(['id','expected']) writer.writerows(output )
Data Series Summarization Project (v3)
6,926,526
<SOS> metric: rmse Kaggle data source: dl-for-exploration-geophysics<choose_model_class>
import numpy as np import pandas as pd
DL for exploration geophysics
6,926,526
submit_confirmed=[] submit_fatal=[] for i in df1: data = i.ConfirmedCases.astype('int32' ).tolist() try: model = SARIMAX(data, order=(1,1,0), seasonal_order=(1,1,0,12),measurement_error=True) model_fit = model.fit(disp=False) predicted = model_fit.predict(len(data), len(data)+34) new=np.concatenate(( np.array(data),np.array([int(num)for num in predicted])) ,axis=0) submit_confirmed.extend(list(new[-43:])) except: submit_confirmed.extend(list(data[-10:-1])) for j in range(34): submit_confirmed.append(data[-1]*2) data = i.Fatalities.astype('int32' ).tolist() try: model = SARIMAX(data, order=(1,1,0), seasonal_order=(1,1,0,12),measurement_error=True) model_fit = model.fit(disp=False) predicted = model_fit.predict(len(data), len(data)+34) new=np.concatenate(( np.array(data),np.array([int(num)for num in predicted])) ,axis=0) submit_fatal.extend(list(new[-43:])) except: submit_fatal.extend(list(data[-10:-1])) for j in range(34): submit_fatal.append(data[-1]*2 )<data_type_conversions>
test = pd.read_csv("/kaggle/input/mldl-competition-1/test.csv") train = pd.read_csv("/kaggle/input/mldl-competition-1/train.csv") sampleSabmission = pd.read_csv("/kaggle/input/mldl-competition-1/sampleSubmission.csv" )
DL for exploration geophysics
6,926,526
df_submit=pd.concat([pd.Series(np.arange(1,1+len(submit_confirmed))),pd.Series(submit_confirmed),pd.Series(submit_fatal)],axis=1) df_submit=df_submit.fillna(method='pad' ).astype(int) df_submit.head()<merge>
print(tf.__version__)
DL for exploration geophysics
6,926,526
df_submit.rename(columns={0: 'ForecastId', 1: 'ConfirmedCases',2: 'Fatalities',}, inplace=True) complete_test= pd.merge(test, df_submit, how="left", on="ForecastId") complete_test.head()<save_to_csv>
column_names = ['Id' 'X','Y','Z','Time'] X_train_orig = train[["X", "Y", "Z"]] X_test_orig = test[["X", "Y", "Z"]] Y_train_orig = train["Time"] print(X_train_orig.shape) print(X_test_orig.shape) print(Y_train_orig.shape)
DL for exploration geophysics
6,926,526
df_submit.to_csv('submission.csv',header=['ForecastId','ConfirmedCases','Fatalities'],index=False) complete_test.to_csv('complete_test.csv',index=False )<set_options>
scaler = MinMaxScaler(feature_range=(-1, 1), copy=True) scaler.fit(X_train_orig) print("Maximum values of X_train(X, Y, Z): " + str(scaler.data_max_)) print("Minimum values of X_train(X, Y, Z): " + str(scaler.data_min_)) X_train_norm = scaler.transform(X_train_orig) X_test_norm = scaler.transform(X_test_orig)
DL for exploration geophysics
6,926,526
pd.set_option("display.max_columns", 200) pd.set_option("display.max_rows", 200 )<load_from_csv>
X_train, X_val, y_train, y_val = train_test_split(X_train_norm, Y_train_orig, test_size=0.2) print(X_train.shape) print(y_train.shape) print(X_val.shape) print(y_val.shape )
DL for exploration geophysics
6,926,526
country_info = pd.read_csv("/kaggle/input/countryinfo/covid19countryinfo.csv") country_info = country_info.rename({"region": "state"}, axis=1) country_info.loc[country_info["state"].isna() , "state"] = "Unknown" country_info = country_info.drop([col for col in country_info.columns if "Unnamed" in col], axis=1) country_info["pop"] = country_info["pop"].str.replace(',', '' ).astype(float) pollution = pd.read_csv("/kaggle/input/pollution-by-country-for-covid19-analysis/region_pollution.csv") pollution = pollution.rename({"Region": "country", "Outdoor Pollution(deaths per 100000)": "outdoor_pol", "Indoor Pollution(deaths per 100000)": "indoor_pol"}, axis=1) economy = pd.read_csv("/kaggle/input/the-economic-freedom-index/economic_freedom_index2019_data.csv", engine='python') economy_cols = [col for col in economy.columns if economy[col].dtype == "float64"] + ["Country"] economy = economy[economy_cols] economy = economy.rename({"Country": "country"}, axis=1) def append_external_data(df): df = pd.merge(df, country_info, on=["country", "state"], how="left") df = pd.merge(df, pollution, on="country", how="left") df = pd.merge(df, economy, on="country", how="left") return df<feature_engineering>
model = keras.Sequential([ keras.layers.Dense(128, input_dim=3, activation='relu'), keras.layers.Dense(6, activation='relu'), keras.layers.Dense(1, activation="linear") ]) print(model.summary())
DL for exploration geophysics
6,926,526
country_info["publicplace"] = np.where(country_info["publicplace"].str.contains("/"), country_info["publicplace"], np.nan )<count_values>
model.compile(optimizer='adam', loss='MSE', metrics=['accuracy'] )
DL for exploration geophysics
6,926,526
country_info["publicplace"].value_counts()<define_variables>
history = model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=5 )
DL for exploration geophysics
6,926,526
list_rel_columns = ['state', 'country', 'pop', 'tests', 'testpop', 'density', 'medianage', 'urbanpop', 'quarantine', 'schools', 'publicplace', 'gatheringlimit', 'gathering', 'nonessential', 'hospibed', 'smokers', 'sex0', 'sex14', 'sex25', 'sex54', 'sex64', 'sex65plus', 'sexratio', 'lung', 'femalelung', 'malelung', 'gdp2019', 'healthexp', 'healthperpop', 'fertility']<drop_column>
test_loss, test_acc = model.evaluate(X_val, y_val, verbose=0) print(' Test loss:', test_loss) print(' Test accuracy:', test_acc )
DL for exploration geophysics
6,926,526
country_info = country_info[list_rel_columns]<filter>
predictions = model.predict(X_test_norm )
DL for exploration geophysics
6,926,526
<merge><EOS>
mySubmission = sampleSabmission mySubmission["Predicted"] = predictions mySubmission.head() filename = 'IvanPredictions_1.csv' mySubmission.to_csv(filename,index=False) print('Saved file: ' + filename )
DL for exploration geophysics
246,962
%%time train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/train.csv' )<drop_column>
%matplotlib inline
March Machine Learning Mania 2016
246,962
junk =['Id','Date','Province_State'] train.drop(junk, axis=1, inplace=True )<feature_engineering>
dr = pd.read_csv(".. /input/RegularSeasonDetailedResults.csv")
March Machine Learning Mania 2016
246,962
train['Country_Region'] = c<data_type_conversions>
simple_df_1 = pd.DataFrame() simple_df_1[["team1", "team2"]] =dr[["Wteam", "Lteam"]].copy() simple_df_1["pred"] = 1 simple_df_2 = pd.DataFrame() simple_df_2[["team1", "team2"]] =dr[["Lteam", "Wteam"]] simple_df_2["pred"] = 0 simple_df = pd.concat(( simple_df_1, simple_df_2), axis=0) simple_df.head()
March Machine Learning Mania 2016
246,962
train['ConfirmedCases'] = train['ConfirmedCases'].astype(int) train['Fatalities'] = train['Fatalities'].astype(int )<drop_column>
n = simple_df.team1.nunique() n
March Machine Learning Mania 2016
246,962
def prep_data(train): X_train = train[train.ConfirmedCases >0] X_train.reset_index(inplace = True, drop = True) train.reset_index(inplace = True, drop = True) return(X_train, train )<concatenate>
trans_dict = {t: i for i, t in enumerate(simple_df.team1.unique())} simple_df["team1"] = simple_df["team1"].apply(lambda x: trans_dict[x]) simple_df["team2"] = simple_df["team2"].apply(lambda x: trans_dict[x]) simple_df.head()
March Machine Learning Mania 2016
246,962
def Calculate_Table(X_train): diff_conf, conf_old = [], 0 diff_fat, fat_old = [], 0 dd_conf, dc_old = [], 0 dd_fat, df_old = [], 0 ratios = [] for row in X_train.values: diff_conf.append(row[1]-conf_old) conf_old = row[1] diff_fat.append(row[2]-fat_old) fat_old = row[2] dd_conf.append(diff_conf[-1]-dc_old) dc_old = diff_conf[-1] dd_fat.append(diff_fat[-1]-df_old) df_old = diff_fat[-1] ratios.append(fat_old / conf_old) ratio = fat_old / conf_old return diff_conf, conf_old, diff_fat, fat_old, dd_conf, dc_old, dd_fat, df_old, ratios, ratio<feature_engineering>
train = simple_df.values np.random.shuffle(train)
March Machine Learning Mania 2016
246,962
def populate_df_features(X_train,diff_conf, diff_fat, dd_conf, dd_fat, ratios): pd.options.mode.chained_assignment = None X_train['diff_confirmed'] = diff_conf X_train['diff_fatalities'] = diff_fat X_train['dd_confirmed'] = dd_conf X_train['dd_fatalities'] = dd_fat X_train['ratios'] = ratios return X_train<categorify>
def embedding_input(name, n_in, n_out, reg): inp = Input(shape=(1,), dtype="int64", name=name) return inp, Embedding(n_in, n_out, input_length=1, W_regularizer=l2(reg))(inp) def create_bias(inp, n_in): x = Embedding(n_in, 1, input_length=1 )(inp) return Flatten()(x)
March Machine Learning Mania 2016
246,962
def fill_nan(variable): if math.isnan(variable): return 0 else: return variable<statistical_test>
n_factors = 50 team1_in, t1 = embedding_input("team1_in", n, n_factors, 1e-4) team2_in, t2 = embedding_input("team2_in", n, n_factors, 1e-4) b1 = create_bias(team1_in, n) b2 = create_bias(team2_in, n)
March Machine Learning Mania 2016
246,962
def Cal_Series_Avg(X_train,ratio): d_c = fill_nan(X_train.diff_confirmed[X_train.diff_confirmed != 0].mean()) dd_c = fill_nan(X_train.dd_confirmed[X_train.dd_confirmed != 0].mean()) d_f = fill_nan(X_train.diff_fatalities[X_train.diff_fatalities != 0].mean()) dd_f = fill_nan(X_train.dd_fatalities[X_train.dd_fatalities != 0].mean()) rate = fill_nan(X_train.ratios[X_train.ratios != 0].mean()) rate = max(rate,ratio) return d_c, dd_c, d_f, dd_f, rate<data_type_conversions>
x = merge([t1, t2], mode="dot") x = Flatten()(x) x = merge([x, b1], mode="sum") x = merge([x, b2], mode="sum") x = Dense(1, activation="sigmoid" )(x) model = Model([team1_in, team2_in], x) model.compile(Adam(0.001), loss="binary_crossentropy")
March Machine Learning Mania 2016
246,962
def apply_taylor(train, d_c, dd_c, d_f, dd_f, rate): pred_c, pred_f = list(train.ConfirmedCases.loc[57:69].astype(int)) , list(train.Fatalities.loc[57:69].astype(int)) for i in range(1, 31): pred_c.append(int(( train.ConfirmedCases[69] + d_c*i + 0.5*dd_c*(i**2)))) pred_f.append(pred_c[-1]*rate) return pred_c, pred_f<data_type_conversions>
history = model.fit([train[:, 0], train[:, 1]], train[:, 2], batch_size=64, nb_epoch=10, verbose=2)
March Machine Learning Mania 2016
246,962
def apply_taylor2(train, d_c, dd_c, d_f, dd_f, rate): pred_c, pred_f = list(train.ConfirmedCases.loc[57:58].astype(int)) , list(train.Fatalities.loc[57:58].astype(int)) for i in range(1, 42): pred_c.append(int(( train.ConfirmedCases[58] + d_c*i + 0.5*dd_c*(i**2)))) pred_f.append(pred_c[-1]*rate) return pred_c, pred_f<groupby>
sub = pd.read_csv(".. /input/SampleSubmission.csv") sub["team1"] = sub["Id"].apply(lambda x: trans_dict[int(x.split("_")[1])]) sub["team2"] = sub["Id"].apply(lambda x: trans_dict[int(x.split("_")[2])]) sub.head()
March Machine Learning Mania 2016
246,962
pc = [] pf = [] pc2 = [] pf2 = [] pred_c = [] pred_f = [] pred_c2 = [] pred_f2 = [] for i,country in enumerate(country_list): country_data = train[train['Country_Region'] == country] X_train, country_data = prep_data(country_data) if(len(X_train)> 0): diff_conf, conf_old, diff_fat, fat_old, dd_conf, dc_old, dd_fat, df_old, ratios, ratio = Calculate_Table(X_train) X_train = populate_df_features(X_train,diff_conf, diff_fat, dd_conf, dd_fat, ratios) d_c, dd_c, d_f, dd_f, rate = Cal_Series_Avg(X_train, ratio) pred_c, pred_f = apply_taylor(country_data, d_c, dd_c, d_f, dd_f, rate) pred_c2, pred_f2 = apply_taylor2(country_data, d_c, dd_c, d_f, dd_f, rate) else: pred_c = list(np.zeros(43)) pred_f = list(np.zeros(43)) pred_c2 = list(np.zeros(43)) pred_f2 = list(np.zeros(43)) pc += pred_c pf += pred_f pc2 += pred_c2 pf2 += pred_f2<categorify>
sub["pred"] = model.predict([sub.team1, sub.team2]) sub = sub[["Id", "pred"]] sub.head()
March Machine Learning Mania 2016
246,962
pc = list(map(int, pc)) pf = list(map(int, pf)) pc2 = list(map(int, pc2)) pf2 = list(map(int, pf2))<import_modules>
sub.to_csv("CF.csv", index=False)
March Machine Learning Mania 2016
5,262,083
import matplotlib.pyplot as plt<save_to_csv>
%load_ext Cython
Conway's Reverse Game of Life
5,262,083
my_submission = pd.DataFrame({'ForecastId': list(range(1,len(pc2)+1)) , 'ConfirmedCases': pc2, 'Fatalities': pf2}) print(my_submission) my_submission.to_csv('submission.csv', index=False )<set_options>
%%cython c @cython.cdivision(True) @cython.boundscheck(False) @cython.nonecheck(False) @cython.wraparound(False) cdef int calc_neighs(unsigned char[:, :] field, int i, int j, int n, int k): cdef: int neighs = 0; int i_min = i - 1; int i_pl = i + 1; int j_min = j - 1; int j_pl = j + 1; neighs = 0 if i_min >= 0: if j_min >= 0: neighs += field[i_min, j_min] neighs += field[i_min, j] if j_pl < k: neighs += field[i_min, j_pl] if j_min >= 0: neighs += field[i, j_min] if j_pl < k: neighs += field[i, j_pl] if i_pl < n: if j_min >= 0: neighs += field[i_pl, j_min] neighs += field[i_pl, j] if j_pl < k: neighs += field[i_pl, j_pl] return neighs @cython.cdivision(True) @cython.boundscheck(False) @cython.nonecheck(False) @cython.wraparound(False) cpdef make_move(unsigned char[:, :] field, int moves): cdef: int _, i, j, neighs; int n, k; int switch = 0; unsigned char[:, :] cur_field; unsigned char[:, :] next_field; cur_field = np.copy(field) next_field = np.zeros_like(field, 'uint8') n = field.shape[0] k = field.shape[1] for _ in range(moves): if switch == 0: for i in range(n): for j in range(k): neighs = calc_neighs(cur_field, i, j, n, k) if cur_field[i, j] and neighs == 2: next_field[i, j] = 1 elif neighs == 3: next_field[i, j] = 1 else: next_field[i, j] = 0 else: for i in range(n): for j in range(k): neighs = calc_neighs(next_field, i, j, n, k) if next_field[i, j] and neighs == 2: cur_field[i, j] = 1 elif neighs == 3: cur_field[i, j] = 1 else: cur_field[i, j] = 0 switch =(switch + 1)% 2 return np.array(next_field if switch else cur_field )
Conway's Reverse Game of Life
5,262,083
%matplotlib inline <define_variables>
NROW, NCOL = 20, 20 def generate_samples(delta=1, n=32): batch = np.split(np.random.binomial(1, 0.5,(NROW * n, NCOL)).astype('uint8'), n) Yy = [life.make_move(state, 5)for state in batch] Xx = [life.make_move(state, 1)for state in Yy] Y = np.array([y.ravel() for y in Yy]) X = np.array([x.ravel() for x in Xx]) return X, Y def data_generator(delta=1, batch_size=32, ravel=True): while True: batch = np.split(np.random.binomial(1, 0.5,(NROW * batch_size, NCOL)).astype('uint8'), batch_size) Yy = [make_move(state, 5)for state in batch] Xx = [make_move(state, delta)for state in Yy] if ravel: Y = np.array([y.ravel() for y in Yy]) X = np.array([x.ravel() for x in Xx]) yield X, Y else: yield np.array(Xx)[:,:, :, np.newaxis], np.array(Yy)[:, :, :, np.newaxis]
Conway's Reverse Game of Life
5,262,083
base_dir = '.. /input/dogs-vs-cats-redux-kernels-edition' train_dir = '.. /data/train' test_dir = '.. /data/test'<load_pretrained>
def create_model(n_hidden_convs=2, n_hidden_filters=128, kernel_size=5): nn = Sequential() nn.add(Conv2D(n_hidden_filters, kernel_size, padding='same', activation='relu', input_shape=(20, 20, 1))) nn.add(BatchNormalization()) for i in range(n_hidden_convs): nn.add(Conv2D(n_hidden_filters, kernel_size, padding='same', activation='relu')) nn.add(BatchNormalization()) nn.add(Conv2D(1, kernel_size, padding='same', activation='sigmoid')) nn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return nn def model_v2() : nn = Sequential() nn.add(Conv2D(128, 5, padding='same', activation=lrelu, input_shape=(20, 20, 1))) nn.add(BatchNormalization()) nn.add(Conv2D(128, 5, padding='valid', activation=lrelu)) nn.add(BatchNormalization()) nn.add(MaxPool2D()) nn.add(Conv2DTranspose(128, 2, strides=(2, 2), padding='valid', activation=lrelu)) nn.add(BatchNormalization()) nn.add(Conv2DTranspose(128, 5, strides=(1, 1), padding='valid', activation=lrelu)) nn.add(BatchNormalization()) nn.add(Conv2D(1, 5, padding='same', activation='sigmoid')) nn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return nn
Conway's Reverse Game of Life
5,262,083
with zipfile.ZipFile(os.path.join(base_dir, 'train.zip')) as train_zip: train_zip.extractall('.. /data') with zipfile.ZipFile(os.path.join(base_dir, 'test.zip')) as test_zip: test_zip.extractall('.. /data' )<define_variables>
models = [] for delta in range(1, 6): model = create_model(n_hidden_convs=6, n_hidden_filters=256) es = EarlyStopping(monitor='loss', patience=9, min_delta=0.001) model.fit_generator(data_generator(delta=delta, ravel=False), steps_per_epoch=500, epochs=50, verbose=1, callbacks=[es]) models.append(model )
Conway's Reverse Game of Life
5,262,083
train_list = glob.glob(os.path.join(train_dir, '*.jpg')) test_list = glob.glob(os.path.join(test_dir, '*.jpg'))<split>
train_df = pd.read_csv('.. /input/train.csv', index_col=0) test_df = pd.read_csv('.. /input/test.csv', index_col=0 )
Conway's Reverse Game of Life
5,262,083
train_list,val_list = train_test_split(train_list,test_size=0.1 )<normalization>
submit_df = pd.DataFrame(index=test_df.index, columns=['start.' + str(_)for _ in range(1, 401)] )
Conway's Reverse Game of Life
5,262,083
class ImageTransform() : def __init__(self, resize, mean, std): self.data_transform = { 'train': transforms.Compose([ transforms.RandomResizedCrop(resize, scale=(0.5, 1.0)) , transforms.RandomHorizontalFlip() , transforms.ToTensor() , transforms.Normalize(mean, std) ]), 'val': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(resize), transforms.ToTensor() , transforms.Normalize(mean, std) ]) } def __call__(self, img, phase): return self.data_transform[phase](img )<categorify>
for delta in range(1, 6): mod = models[delta-1] delta_df = test_df[test_df.delta == delta].iloc[:, 1:].values.reshape(-1, 20, 20, 1) submit_df[test_df.delta == delta] = mod.predict(delta_df ).reshape(-1, 400 ).round(0 ).astype('uint8' )
Conway's Reverse Game of Life
5,262,083
<set_options><EOS>
submit_df.to_csv('cnns_40.csv' )
Conway's Reverse Game of Life
13,799,686
<SOS> metric: CategorizationAccuracy Kaggle data source: cifar-10<create_dataframe>
print("Tensorflow version " + tf.__version__) AUTO = tf.data.experimental.AUTOTUNE
CIFAR-10 - Object Recognition in Images
13,799,686
train_dataset = DogvsCatDataset(train_list, transform=ImageTransform(size, mean, std), phase='train') val_dataset = DogvsCatDataset(val_list, transform=ImageTransform(size, mean, std), phase='val' )<load_pretrained>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except ValueError: strategy = tf.distribute.MirroredStrategy() print("Number of accelerators: ", strategy.num_replicas_in_sync )
CIFAR-10 - Object Recognition in Images
13,799,686
train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_dataloader = data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False) dataloader_dict = {'train': train_dataloader, 'val': val_dataloader} print('Operation Check') batch_iterator = iter(train_dataloader) inputs, label = next(batch_iterator) print(inputs.size()) print(label )<choose_model_class>
GCS_DS_PATH = KaggleDatasets().get_gcs_path("cifar-10-unzipped" )
CIFAR-10 - Object Recognition in Images
13,799,686
use_pretrained = True net = models.resnet50(pretrained=use_pretrained) print(net )<train_model>
IMAGE_SIZE = [32, 32] EPOCHS = 100 BATCH_SIZE = 16 * strategy.num_replicas_in_sync ROOT_PATH_MAP = { 32: GCS_DS_PATH + "/tfrecords-jpeg-32x32/" } ROOT_PATH = ROOT_PATH_MAP[IMAGE_SIZE[0]] TRAINING_FILENAMES = tf.io.gfile.glob(ROOT_PATH + "train/*.tfrec") VALIDATION_FILENAMES = tf.io.gfile.glob(ROOT_PATH + "validation/*.tfrec") TEST_FILENAMES = tf.io.gfile.glob(ROOT_PATH + "test/*.tfrec") CLASSES = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
CIFAR-10 - Object Recognition in Images
13,799,686
net.fc = nn.Linear(in_features=2048, out_features=2) print('Done' )<find_best_params>
np.set_printoptions(threshold=15, linewidth=80) def batch_to_numpy_images_and_labels(data): images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() if numpy_labels.dtype == object: numpy_labels = [None for _ in enumerate(numpy_images)] return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): if correct_label is None: return CLASSES[label], True correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '', CLASSES[correct_label] if not correct else ''), correct def display_one_flower(image, title, subplot, red=False, titlesize=16): plt.subplot(*subplot) plt.axis('off') plt.imshow(image) if len(title)> 0: plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5)) return(subplot[0], subplot[1], subplot[2]+1) def display_batch_of_images(databatch, predictions=None): images, labels = batch_to_numpy_images_and_labels(databatch) if labels is None: labels = [None for _ in enumerate(images)] rows = int(math.sqrt(len(images))) cols = len(images)//rows FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else CLASSES[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show() def display_confusion_matrix(cmat, score, precision, recall): plt.figure(figsize=(15,15)) ax = plt.gca() ax.matshow(cmat, cmap='Reds') ax.set_xticks(range(len(CLASSES))) ax.set_xticklabels(CLASSES, fontdict={'fontsize': 7}) plt.setp(ax.get_xticklabels() , rotation=45, ha="left", rotation_mode="anchor") ax.set_yticks(range(len(CLASSES))) ax.set_yticklabels(CLASSES, fontdict={'fontsize': 7}) plt.setp(ax.get_yticklabels() , rotation=45, ha="right", rotation_mode="anchor") titlestring = "" if score is not None: titlestring += 'f1 = {:.3f} '.format(score) if precision is not None: titlestring += ' precision = {:.3f} '.format(precision) if recall is not None: titlestring += ' recall = {:.3f} '.format(recall) if len(titlestring)> 0: ax.text(101, 1, titlestring, fontdict={'fontsize': 18, 'horizontalalignment':'right', 'verticalalignment':'top', 'color':' plt.show() def display_training_curves(training, validation, title, subplot): if subplot%10==1: plt.subplots(figsize=(10,10), facecolor=' plt.tight_layout() ax = plt.subplot(subplot) ax.set_facecolor(' ax.plot(training) ax.plot(validation) ax.set_title('model '+ title) ax.set_ylabel(title) ax.set_xlabel('epoch') ax.legend(['train', 'valid.'] )
CIFAR-10 - Object Recognition in Images
13,799,686
params_to_update = [] update_params_name = ['fc.weight', 'fc.bias'] for name, param in net.named_parameters() : if name in update_params_name: param.requires_grad = True params_to_update.append(param) print(name) else: param.requires_grad = False<train_model>
def to_float32(image, label): return tf.cast(image, tf.float32), label def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): image = tf.image.random_flip_left_right(image) image = tf.image.random_saturation(image, 0, 2) image = tf.clip_by_value(image, 0, 1) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
CIFAR-10 - Object Recognition in Images
13,799,686
def lr_schedule(epoch): lr = 1e-3 if epoch > 95: lr *= 0.5e-3 elif epoch > 80: lr *= 1e-3 elif epoch > 50: lr *= 1e-2 elif epoch > 20: lr *= 1e-1 print('Learning rate: ', lr) return lr<choose_model_class>
print("Training data shapes:") for image, label in get_training_dataset().take(3): print(image.numpy().shape, label.numpy().shape) print("Training data label examples:", label.numpy()) print("Validation data shapes:") for image, label in get_validation_dataset().take(3): print(image.numpy().shape, label.numpy().shape) print("Validation data label examples:", label.numpy()) print("Test data shapes:") for image, idnum in get_test_dataset().take(3): print(image.numpy().shape, idnum.numpy().shape) print("Test data IDs:", idnum.numpy().astype('U'))
CIFAR-10 - Object Recognition in Images
13,799,686
criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(params=params_to_update,lr=lr_schedule(0))<train_model>
training_dataset = get_training_dataset() training_dataset = training_dataset.unbatch().batch(20) train_batch = iter(training_dataset )
CIFAR-10 - Object Recognition in Images
13,799,686
def train_model(net, dataloader_dict, criterion, optimizer, num_epoch): since = time.time() best_model_wts = copy.deepcopy(net.state_dict()) best_acc = 0.0 net = net.to(device) for epoch in range(num_epoch): print('Epoch {}/{}'.format(epoch + 1, num_epoch)) print('-'*20) for phase in ['train', 'val']: if phase == 'train': net.train() else: net.eval() epoch_loss = 0.0 epoch_corrects = 0 for inputs, labels in tqdm(dataloader_dict[phase]): inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): outputs = net(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) if phase == 'train': loss.backward() optimizer.step() epoch_loss += loss.item() * inputs.size(0) epoch_corrects += torch.sum(preds == labels.data) epoch_loss = epoch_loss / len(dataloader_dict[phase].dataset) epoch_acc = epoch_corrects.double() / len(dataloader_dict[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(net.state_dict()) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) net.load_state_dict(best_model_wts) return net<train_model>
display_batch_of_images(next(train_batch))
CIFAR-10 - Object Recognition in Images
13,799,686
num_epoch = 10 net = train_model(net, dataloader_dict, criterion, optimizer, num_epoch )<categorify>
validation_dataset = get_validation_dataset() validation_dataset = validation_dataset.unbatch().batch(20) validation_batch = iter(validation_dataset )
CIFAR-10 - Object Recognition in Images
13,799,686
id_list = [] pred_list = [] with torch.no_grad() : for test_path in tqdm(test_list): img = Image.open(test_path) _id = int(test_path.split('/')[-1].split('.')[0]) transform = ImageTransform(size, mean, std) img = transform(img, phase='val') img = img.unsqueeze(0) img = img.to(device) net.eval() outputs = net(img) preds = F.softmax(outputs, dim=1)[:, 1].tolist() id_list.append(_id) pred_list.append(preds[0]) res = pd.DataFrame({ 'id': id_list, 'label': pred_list }) res.sort_values(by='id', inplace=True) res.reset_index(drop=True, inplace=True) res.to_csv('submission.csv', index=False )<import_modules>
display_batch_of_images(next(validation_batch))
CIFAR-10 - Object Recognition in Images
13,799,686
<install_modules>
test_dataset = get_test_dataset() test_dataset = test_dataset.unbatch().batch(20) test_batch = iter(test_dataset )
CIFAR-10 - Object Recognition in Images
13,799,686
!pip install segmentation_models_pytorch ! git clone https://github.com/Bjarten/early-stopping-pytorch.git ! mv./early-stopping-pytorch./lib<import_modules>
display_batch_of_images(next(test_batch))
CIFAR-10 - Object Recognition in Images
13,799,686
import torch from torch import nn from torch.utils.data import Dataset, DataLoader, sampler from pathlib import Path from PIL import Image import matplotlib.pyplot as plt import numpy as np import pandas as pd import time import torchvision import cv2 import re import segmentation_models_pytorch as smp from lib.pytorchtools import *<categorify>
with strategy.scope() : model = Sequential() model.add(Conv2D(32,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001), input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))) model.add(BatchNormalization()) model.add(Conv2D(32,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(BatchNormalization()) model.add(MaxPooling2D(( 2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(BatchNormalization()) model.add(Conv2D(64,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(BatchNormalization()) model.add(MaxPooling2D(( 2, 2))) model.add(Dropout(0.3)) model.add(Conv2D(128,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(BatchNormalization()) model.add(Conv2D(128,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(BatchNormalization()) model.add(MaxPooling2D(( 2, 2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu', kernel_initializer='he_uniform', kernel_regularizer=l2(0.001))) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary()
CIFAR-10 - Object Recognition in Images
13,799,686
class NerveDataset(Dataset): def __init__(self, directory, pytorch=True, is_test=False): super().__init__() self.files = [] for file_name in directory.iterdir() : if 'mask' in str(file_name): continue self.files.append(self.combine_files(file_name)) self.files = sorted(self.files, key=lambda file: int(re.search(r'\d+', str(file['image'])).group(0))) self.pytorch = pytorch self.resize = torchvision.transforms.Resize(( 224,224),interpolation=Image.NEAREST) self.is_test = is_test def combine_files(self, file_name: Path): files = { 'image': file_name, 'mask': '.. ' + str(file_name ).split('.')[2] + '_mask.tif', } return files def __len__(self): return len(self.files) def open_as_array(self, idx, invert=False): raw_image = self.resize(Image.open(self.files[idx]['image'])) raw_image = raw_image = np.stack([ np.array(raw_image)], axis=2) if invert: raw_image = raw_image.transpose(( 2,0,1)) return(raw_image / np.iinfo(raw_image.dtype ).max) def open_mask(self, idx, add_dims=False): raw_mask = self.resize(Image.open(self.files[idx]['mask'])) raw_mask = np.array(raw_mask) raw_mask = np.where(raw_mask==255, 1, 0) return np.expand_dims(raw_mask, 0)if add_dims else raw_mask def __getitem__(self, idx): x = torch.tensor(self.open_as_array(idx, invert=self.pytorch), dtype=torch.float32) if not self.is_test: y = torch.tensor(self.open_mask(idx, add_dims=True), dtype=torch.torch.float32) return x, y return x def open_as_pil(self, idx): arr = 256*self.open_as_array(idx) return Image.fromarray(arr.astype(np.uint8), 'L') def __repr__(self): s = 'Dataset class with {} files'.format(self.__len__()) return s <create_dataframe>
training_dataset = get_training_dataset().map(to_float32) validation_dataset = get_validation_dataset().map(to_float32) checkpoint = ModelCheckpoint("model_checkpoint.h5", monitor='val_loss', mode='min', save_best_only=True, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, restore_best_weights=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, verbose=1, min_delta=0.0001) callbacks = [early_stopping, reduce_lr] history = model.fit(training_dataset, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=validation_dataset, callbacks=callbacks )
CIFAR-10 - Object Recognition in Images
13,799,686
images_path = Path('.. /input/ultrasound-nerve-segmentation/train') data = NerveDataset(images_path) len(data )<train_model>
cmdataset = get_validation_dataset(ordered=True) cmdataset = cmdataset.map(to_float32) images_ds = cmdataset.map(lambda image, label: image) labels_ds = cmdataset.map(lambda image, label: label ).unbatch() cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy() cm_probabilities = model.predict(images_ds) cm_predictions = np.argmax(cm_probabilities, axis=-1) print("Correct labels: ", cm_correct_labels.shape, cm_correct_labels) print("Predicted labels: ", cm_predictions.shape, cm_predictions )
CIFAR-10 - Object Recognition in Images
13,799,686
split_rate = 0.7 train_ds_len = int(len(data)* split_rate) valid_ds_len = len(data)- train_ds_len train_ds, valid_ds = torch.utils.data.random_split(data,(train_ds_len, valid_ds_len)) print(f'Train dataset length: {len(train_ds)} ') print(f'Validation dataset length: {len(valid_ds)} ') print(f'All data length: {len(data)} ' )<choose_model_class>
cmat = confusion_matrix(cm_correct_labels, cm_predictions, labels=range(len(CLASSES))) score = f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') precision = precision_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') recall = recall_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro') cmat =(cmat.T / cmat.sum(axis=1)).T display_confusion_matrix(cmat, score, precision, recall) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall))
CIFAR-10 - Object Recognition in Images
13,799,686
unet = model = smp.Unet( encoder_name="resnet34", encoder_weights="imagenet", in_channels=1, classes=1, activation = "sigmoid" ) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") learning_rate = 0.001 epochs = 50 metrics = [smp.utils.metrics.IoU() ] loss_function = smp.utils.losses.DiceLoss() optimizer = torch.optim.Adam(model.parameters() , lr=learning_rate) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs) stopper = EarlyStopping(patience=3) train_epoch = smp.utils.train.TrainEpoch(model, loss=loss_function, optimizer=optimizer, metrics=metrics, device=device, verbose=True) val_epoch = smp.utils.train.ValidEpoch(model, loss=loss_function, metrics=metrics, device=device, verbose=True) train_dl = DataLoader(train_ds, batch_size=16, shuffle=True) valid_dl = DataLoader(valid_ds, batch_size=16, shuffle=True )<create_dataframe>
test_ds = get_test_dataset(ordered=True) test_ds = test_ds.map(to_float32) print('Computing predictions...') test_images_ds = test_ds.map(lambda image, idnum: image) probabilities = model.predict(test_images_ds) predictions = np.argmax(probabilities, axis=-1) print(predictions) label_map = np.vectorize(lambda index : CLASSES[int(index)]) predictions_with_label = label_map(predictions) print(predictions_with_label) print('Generating submission.csv file...') test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions_with_label]), fmt=['%s', '%s'], delimiter=',', header='id,label', comments='') !head submission.csv
CIFAR-10 - Object Recognition in Images
13,799,686
test_images_path = Path('.. /input/ultrasound-nerve-segmentation/test') test_data = NerveDataset(test_images_path, is_test=True) test_dl = DataLoader(test_data, batch_size=1, shuffle=False) len(test_data )<categorify>
dataset = get_validation_dataset() dataset = dataset.unbatch().batch(20) batch = iter(dataset )
CIFAR-10 - Object Recognition in Images
13,799,686
def rle_encoding(x): dots = np.where(x.T.flatten() ==1)[0] run_lengths = [] prev = -2 for b in dots: if(b>prev+1): run_lengths.extend(( b+1, 0)) run_lengths[-1] += 1 prev = b return run_lengths<categorify>
images, labels = next(batch) probabilities = model.predict(tf.cast(images, tf.float32)) predictions = np.argmax(probabilities, axis=-1) display_batch_of_images(( images, labels), predictions )
CIFAR-10 - Object Recognition in Images
66,907
encodings = [] counter = 0 for image in test_dl: if torch.cuda.is_available() : image = image.cuda() pr_mask = unet(image) pr_mask = pr_mask[0] pr_mask = pr_mask.squeeze().cpu().detach().numpy().round().astype(np.uint8) pr_mask = cv2.resize(pr_mask,(580,420), interpolation=cv2.INTER_CUBIC) encodings.append(rle_encoding(pr_mask)) print(f'Image: {counter} ') counter += 1 <save_to_csv>
print(check_output(["cp", ".. /input/sampleSubmission.csv", "sub.csv"] ).decode("utf8"))
Painter by Numbers
3,352,071
df_submission = pd.DataFrame(columns=["img", "pixels"]) for i, encoding in enumerate(encodings): pixels = ' '.join(map(str, encoding)) df_submission.loc[i] = [str(i+1), pixels] df_submission.to_csv('./submission.csv', index=False) print('Done!' )<install_modules>
! tar xf.. /input/bird-songs-pad-and-resize-spectrogram/spectrograms_resized.tar.bz2
Multi-label Bird Species Classification - NIPS 2013
3,352,071
!pip install git+https://github.com/qubvel/segmentation_models.pytorch<load_from_zip>
%reload_ext autoreload %autoreload 2 %matplotlib inline
Multi-label Bird Species Classification - NIPS 2013
3,352,071
!git clone https://github.com/Bjarten/early-stopping-pytorch.git esp<import_modules>
data_dir = Path('.. /input') label_dir = data_dir/'multilabel-bird-species-classification-nips2013/nips4b_bird_challenge_train_labels/NIPS4B_BIRD_CHALLENGE_TRAIN_LABELS' spect_dir = Path('./spectrograms_resized' )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler from torch.utils.data import Dataset, DataLoader import torchvision from torchvision import transforms import pandas as pd import numpy as np import os from torchvision import transforms from PIL import Image import segmentation_models_pytorch as smp from esp.pytorchtools import EarlyStopping import matplotlib.pyplot as plt<train_model>
df = pd.read_csv(label_dir/'nips4b_birdchallenge_train_labels.csv', skiprows=[0, 2]) df.tail()
Multi-label Bird Species Classification - NIPS 2013
3,352,071
def save_checkpoint(self, val_loss, model): if self.verbose: self.trace_func(f'Validation loss decreased({self.val_loss_min:.6f} --> {val_loss:.6f} ).Saving model...') with open(self.path, 'wb')as f: pickle.dump(model, f) self.val_loss_min = val_loss<drop_column>
df.drop(df.columns[[1, 3]],axis=1,inplace=True) df.rename(columns={df.columns[0]:'file', df.columns[1]:'EMPTY'}, inplace=True) df = df[:-1] df.fillna(0, inplace=True) df = df.astype('int32', errors='ignore') df['file'] = df['file'].apply(lambda fn: str(Path(fn ).with_suffix(''))) df.tail()
Multi-label Bird Species Classification - NIPS 2013
3,352,071
EarlyStopping.save_checkpoint = save_checkpoint<define_variables>
tfms = get_transforms(do_flip=False, max_rotate=None, max_warp=None) data =(src.transform(tfms, size=128) .databunch(num_workers=0 ).normalize(imagenet_stats))
Multi-label Bird Species Classification - NIPS 2013
3,352,071
input_ = '.. /input/ultrasound-nerve-segmentation' train_path = f'{input_}/train' test_path = f'{input_}/test' train_csv_path = 'train_annotation.csv' test_path = '.. /input/ultrasound-nerve-segmentation/test'<save_to_csv>
data.show_batch(rows=3, figsize=(12,9), ds_type=DatasetType.Valid )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
def create_csv(data_path, out_csv_path, key_word='mask'): to_delete = f'_{key_word}' for file_name in os.listdir(data_path): if key_word in file_name: img = file_name.replace(to_delete, '') data = pd.DataFrame([img], index=['img'] ).transpose() data.insert(0, 'mask', file_name) else: if not os.path.exists(out_csv_path): data.to_csv(out_csv_path, header=True, index=False) else: data.to_csv(out_csv_path, mode='a', header=False, index=False )<save_to_csv>
arch = models.resnet50 acc_02 = partial(accuracy_thresh, thresh=0.2) learn = cnn_learner(data, arch, metrics=acc_02, path='.' )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
create_csv(data_path=train_path, out_csv_path=train_csv_path )<categorify>
lr = 2.29E-02 learn.fit_one_cycle(5, slice(lr))
Multi-label Bird Species Classification - NIPS 2013
3,352,071
class ImageDataset(Dataset): def __init__(self, df, root_dir, transform=None): self.df = df self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() mask = Image.open(os.path.join(self.root_dir, self.df.iloc[idx, 0])) image = Image.open(os.path.join(self.root_dir, self.df.iloc[idx, 1])) if self.transform: return self.transform(image), self.transform(mask) return image, mask<load_from_csv>
learn.save('stage-1-rn50', return_path=True )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
train_df = pd.read_csv(train_csv_path )<create_dataframe>
learn.fit_one_cycle(5, slice(3.02E-03, lr/5))
Multi-label Bird Species Classification - NIPS 2013
3,352,071
train_samples = ImageDataset(df=train_df, root_dir=train_path )<define_variables>
learn.save('stage-2-rn50' )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
ENCODER = 'vgg11_bn' ENCODER_WEIGHTS = 'imagenet' ACTIVATION = 'sigmoid' DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )<categorify>
data_256 =(src.transform(tfms, size=256) .databunch(num_workers=0 ).normalize(imagenet_stats)) learn.data = data_256 learn.data.train_ds[0][0].shape
Multi-label Bird Species Classification - NIPS 2013
3,352,071
model = smp.Unet( encoder_name=ENCODER, encoder_weights=ENCODER_WEIGHTS, in_channels=1, classes=1, activation=ACTIVATION )<choose_model_class>
lr = 5E-03 learn.fit_one_cycle(5, slice(lr))
Multi-label Bird Species Classification - NIPS 2013
3,352,071
loss = smp.utils.losses.DiceLoss() metrics = [smp.utils.metrics.IoU() ] optimizer = torch.optim.Adam scheduler = lr_scheduler.StepLR<categorify>
learn.save('stage-1-256-rn50' )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
my_transforms = transforms.Compose([ transforms.Resize(size=(224, 224)) , transforms.ToTensor() ] )<split>
learn.fit_one_cycle(5, slice(1.58E-06, lr/5))
Multi-label Bird Species Classification - NIPS 2013
3,352,071
def split_df(df, fraction=0.8): df_1 = df.sample(frac=fraction) return df_1, df.drop(df_1.index )<train_model>
learn.save('stage-2-256-rn50' )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
def train(model, train_df, train_dir, optimizer, loss, metrics, learning_rate=0.01, batch_size=20, epochs=10, patience=3, scheduler=None, step_size=5, gamma=0.1, device='cpu', transform=None): early_stopping = EarlyStopping(patience, path='best_model.pkl', verbose=True) optimizer = optimizer(model.parameters() , learning_rate) if scheduler: scheduler = scheduler(optimizer, step_size, gamma) train_epoch = smp.utils.train.TrainEpoch( model, loss, metrics, optimizer, device, verbose=True ) valid_epoch = smp.utils.train.ValidEpoch( model, loss, metrics, device, verbose=True ) train_logs, valid_logs = [], [] for epoch in range(epochs): train_dataframe, val_dataframe = split_df(train_df) train_dataset = ImageDataset(train_dataframe, train_dir, transform=transform) valid_dataset = ImageDataset(val_dataframe, train_dir, transform=transform) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False) print(f' Epoch: {epoch+1}/{epochs}') train_log = train_epoch.run(train_loader) valid_log = valid_epoch.run(valid_loader) train_logs.append(train_log) valid_logs.append(valid_log) early_stopping(valid_log[loss.__name__], model) if early_stopping.early_stop: print("Early stopping") break if scheduler: scheduler.step() return train_logs, valid_logs<train_model>
learn.fit_one_cycle(5, slice(1.58E-06, lr/5))
Multi-label Bird Species Classification - NIPS 2013
3,352,071
res = train(model=model, train_df=train_df, train_dir=train_path, optimizer=optimizer, loss=loss, learning_rate=0.01, metrics=metrics, batch_size=20, epochs=20, scheduler=scheduler, step_size=10, patience=3, device=DEVICE, transform=my_transforms )<import_modules>
learn.save('stage-2-256-rn50-10e' )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
import pickle<save_to_csv>
test = ImageList.from_folder(spect_dir/'test/') len(test )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
with open('results.pkl', 'wb')as f: pickle.dump(res, f )<load_pretrained>
predictor = load_learner('.', test=test, num_workers=0) preds, _ = predictor.get_preds(ds_type=DatasetType.Test) fpreds = preds[:, 1:].reshape(-1, )
Multi-label Bird Species Classification - NIPS 2013
3,352,071
with open('best_model.pkl', 'rb')as f: best_model = pickle.load(f )<create_dataframe>
names = [f.stem for f in predictor.data.test_ds.items] fnames = [x + '.wav_classnumber_' + str(i)for x in names for i in range(1, len(data.classes)) ]
Multi-label Bird Species Classification - NIPS 2013
3,352,071
<categorify><EOS>
test_df = pd.DataFrame({'ID':fnames, 'Probability':fpreds}, columns=['ID', 'Probability']) test_df.to_csv('submission.csv', index=False )
Multi-label Bird Species Classification - NIPS 2013
3,334,951
<SOS> metric: AUC Kaggle data source: influencers-in-social-networks<import_modules>
warnings.simplefilter(action='ignore', category=FutureWarning) gc.enable()
Influencers in Social Networks
3,334,951
from tqdm.notebook import tqdm<sort_values>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Influencers in Social Networks
3,334,951
imgs = [f for f in os.listdir(test_path)] imgs = sorted(imgs, key=lambda s: int(s.split('.')[0]))<save_to_csv>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Influencers in Social Networks
3,334,951
def create_csv_submission(model, data_path, img_list, out_path): submission_df = pd.DataFrame(columns=['img', 'pixels']) model.to(DEVICE) model.eval() for i, img in enumerate(tqdm(img_list)) : x = Image.open(os.path.join(data_path, img)) x = my_transforms(x) x = x.unsqueeze(0 ).to(DEVICE) pred_mask = model.predict(x) pred_mask = pred_mask.cpu() pred_mask = transforms.Resize(size=(420, 580))(pred_mask) encoding = rle_encoding(pred_mask) pixels = ' '.join(map(str, encoding)) submission_df.loc[i] = [str(i+1), pixels] submission_df.to_csv(out_path, index=False )<save_to_csv>
def pre_pro(df): df = df.astype('float32') col = df.columns for i in range(len(col)) : m = df.loc[df[col[i]] != -np.inf, col[i]].min() df[col[i]].replace(-np.inf,m,inplace=True) M = df.loc[df[col[i]] != np.inf, col[i]].max() df[col[i]].replace(np.inf,M,inplace=True) df.fillna(0, inplace = True) return df
Influencers in Social Networks
3,334,951
create_csv_submission(model=model, data_path=test_path, img_list=imgs, out_path='submission.csv' )<load_from_csv>
def feat_eng(df): df.replace(0, 0.001) df['follower_diff'] =(df['A_follower_count'] > df['B_follower_count']) df['following_diff'] =(df['A_following_count'] > df['B_following_count']) df['listed_diff'] =(df['A_listed_count'] > df['B_listed_count']) df['ment_rec_diff'] =(df['A_mentions_received'] > df['B_mentions_received']) df['rt_rec_diff'] =(df['A_retweets_received'] > df['B_retweets_received']) df['ment_sent_diff'] =(df['A_mentions_sent'] > df['B_mentions_sent']) df['rt_sent_diff'] =(df['A_retweets_sent'] > df['B_retweets_sent']) df['posts_diff'] =(df['A_posts'] > df['B_posts']) df['A_pop_ratio'] = df['A_mentions_sent']/df['A_listed_count'] df['A_foll_ratio'] = df['A_follower_count']/df['A_following_count'] df['A_ment_ratio'] = df['A_mentions_sent']/df['A_mentions_received'] df['A_rt_ratio'] = df['A_retweets_sent']/df['A_retweets_received'] df['B_pop_ratio'] = df['B_mentions_sent']/df['B_listed_count'] df['B_foll_ratio'] = df['B_follower_count']/df['B_following_count'] df['B_ment_ratio'] = df['B_mentions_sent']/df['B_mentions_received'] df['B_rt_ratio'] = df['B_retweets_sent']/df['B_retweets_received'] df['A/B_foll_ratio'] =(df['A_foll_ratio'] > df['B_foll_ratio']) df['A/B_ment_ratio'] =(df['A_ment_ratio'] > df['B_ment_ratio']) df['A/B_rt_ratio'] =(df['A_rt_ratio'] > df['B_rt_ratio']) df['nf1_diff'] =(df['A_network_feature_1'] > df['B_network_feature_1']) df['nf2_diff'] =(df['A_network_feature_2'] > df['B_network_feature_2']) df['nf3_diff'] =(df['A_network_feature_3'] > df['B_network_feature_3']) df['nf3_ratio'] = df['A_network_feature_3'] / df['B_network_feature_3'] df['nf2_ratio'] = df['A_network_feature_2'] / df['B_network_feature_2'] df['nf1_ratio'] = df['A_network_feature_1'] / df['B_network_feature_1'] return(pre_pro(df))
Influencers in Social Networks
3,334,951
child=pd.read_csv('.. /input/child_wishlist_v2.csv',header=None,index_col=0) childArray=child.values <load_from_csv>
fe_train = feat_eng(train.copy()) fe_test = feat_eng(test.copy() )
Influencers in Social Networks
3,334,951
gift=pd.read_csv('.. /input/gift_goodkids_v2.csv',header=None,index_col=0) giftArray=gift.values <load_from_csv>
train_df = fe_train test_df = fe_test y_train = np.array(train_df['Choice'] )
Influencers in Social Networks
3,334,951
submitReader=pd.read_csv('.. /input/sample_submission_random_v2.csv') submit=submitReader<compute_test_metric>
target = 'Choice' predictors = train_df.columns.values.tolist() [1:]
Influencers in Social Networks
3,334,951
def childListPosition(whichChild,whichGift): temp=np.argwhere(childArray[whichChild]==whichGift) if temp.size>0: return temp[0][0]+1 else: return 0 def giftListPosition(whichGift,whichChild): temp=np.argwhere(giftArray[whichGift]==whichChild) if temp.size>0: return temp[0][0]+1 else: return 0 def childScore(whichChild,whichGift): position=childListPosition(whichChild,whichGift) if position>0: return(100-position-1)*2 else: return -1 def giftScore(whichGift,whichChild): position=childListPosition(whichGift,whichChild) if position>0: return(1000-position-1)*2 else: return -1 def score(whichChild,whichGift): tempa=childScore(whichChild,whichGift) tempb=giftScore(whichGift,whichChild) return tempa*1+tempb*1<normalization>
param_lgb = { 'feature_fraction': 0.4647875434283183, 'lambda_l1': 0.14487098904632512, 'lambda_l2': 0.9546002933329684, 'learning_rate': 0.050592093295320606, 'max_depth': int(round(7.696194993998026)) , 'min_data_in_leaf': int(round(9.879507661608065)) , 'min_gain_to_split': 0.7998292013880356, 'min_sum_hessian_in_leaf': 0.24962103361366683, 'num_leaves': int(round(2.854239951949671)) , 'max_bin': 63, 'bagging_fraction': 1.0, 'bagging_freq': 5, 'save_binary': True, 'seed': 1965, 'feature_fraction_seed': 1965, 'bagging_seed': 1965, 'drop_seed': 1965, 'data_random_seed': 1965, 'objective': 'binary', 'boosting_type': 'gbdt', 'verbose': 1, 'metric': 'auc', 'is_unbalance': True, 'boost_from_average': False}
Influencers in Social Networks
3,334,951
def changePosition(positionA,positionB): tempa=submit.iloc[positionA][1] tempb=submit.iloc[positionB][1] submit.iloc[positionA][1]=tempb submit.iloc[positionB][1]=tempa return<compute_test_metric>
nfold = 20 skf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=2019) oof = np.zeros(len(fe_train)) predictions = np.zeros(( len(fe_test),nfold)) i = 1 for train_index, valid_index in skf.split(fe_train, fe_train.Choice.values): print(" fold {}".format(i)) xg_train = lgb.Dataset(fe_train.iloc[train_index][predictors].values, label=fe_train.iloc[train_index][target].values, feature_name=predictors, free_raw_data = False ) xg_valid = lgb.Dataset(fe_train.iloc[valid_index][predictors].values, label=fe_train.iloc[valid_index][target].values, feature_name=predictors, free_raw_data = False ) clf = lgb.train(param_lgb, xg_train, 10000000, valid_sets = [xg_valid], verbose_eval=250, early_stopping_rounds = 100) oof[valid_index] = clf.predict(fe_train.iloc[valid_index][predictors].values, num_iteration=clf.best_iteration) predictions[:,i-1] += clf.predict(fe_test[predictors], num_iteration=clf.best_iteration) i = i + 1 print(" CV AUC: {:<0.8f}".format(metrics.roc_auc_score(fe_train.Choice.values, oof)) )
Influencers in Social Networks
3,334,951
def currentScore(childA,childB): giftA=submit.iloc[childA][1] giftB=submit.iloc[childB][1] scoreA=score(childA,giftA) scoreB=score(childB,giftB) return scoreA+scoreB def predictScore(childA,childB): giftA=submit.iloc[childA][1] giftB=submit.iloc[childB][1] scoreA=score(childA,giftB) scoreB=score(childB,giftA) return scoreA+scoreB<compute_test_metric>
lgb_bay = [] for i in range(len(predictions)) : lgb_bay.append(predictions[i][-1] )
Influencers in Social Networks
3,334,951
def agent(childA,childB): tempA=currentScore(childA,childB) tempB=predictScore(childA,childB) if tempA>tempB: return 0 elif tempA==tempB: changePosition(childA,childB) elif tempA<tempB: changePosition(childA,childB) print('childA:'+str(childA) +' childB:'+str(childB) +' currentScore:'+str(tempA) +' predictScore:'+str(tempB) +' improved:'+str(tempB-tempA) ) else: return 1 <save_to_csv>
submission = pd.read_csv('.. /input/sample_predictions.csv') submission['Choice'] = lgb_bay submission.to_csv('sub.csv', index = False, header = True )
Influencers in Social Networks