kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
18,990,193
combined[combined['Age'].notnull() ].groupby(['Pclass','Sex','AgeGroup'])['Age'].mean()<categorify>
Digit Recognizer
18,990,193
def Age(cols): Age=cols[0] Pclass=cols[1] Sex=cols[2] AgeGroup=cols[3] if pd.isnull(Age): if Pclass==1: if Sex=="male": if AgeGroup=='adult': return 42 else: return 7 elif Sex=="female": if AgeGroup=='adult': return 37 else: return 8 elif Pclass==2: if Sex=="male": if AgeGroup=='adult': return 33 else: return 4 elif Sex=="female": if AgeGroup=='adult': return 31 else: return 7 elif Pclass==3: if Sex=="male": if AgeGroup=='adult': return 29 else: return 7 elif Sex=="female": if AgeGroup=='adult': return 27 else: return 5 else: return Age combined["Age"]=combined[["Age","Pclass","Sex","AgeGroup"]].apply(Age,axis=1 )<categorify>
Digit Recognizer
18,990,193
def AgeBand(col): Age=col[0] if Age <=7: return "0-7" elif Age <=14: return "8-14" elif Age <=21: return "15-21" elif Age <= 28: return "22-28" elif Age <= 35: return "29-35" elif Age <= 42: return "36-42" elif Age <= 49: return "43-49" elif Age <= 56: return "50-56" elif Age <= 63: return "57-63" else: return ">=64" combined["AgeBand"]=combined[["Age"]].apply(AgeBand,axis=1 )<groupby>
Digit Recognizer
18,990,193
combined.groupby(['Pclass','Embarked'])['PassengerId'].count()<feature_engineering>
Digit Recognizer
18,990,193
combined[combined['Embarked'].isnull() ]['Embarked'] = combined['Embarked'].mode()<merge>
epochs_num = 40 batch_size = 128 validation_split_part = 0.2 model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(train_image, train_label, epochs=epochs_num, batch_size=batch_size, validation_split=validation_split_part, shuffle=True )
Digit Recognizer
18,990,193
ticketCount = combined.groupby('Ticket')['PassengerId'].count().reset_index() ticketCount.rename(columns={'PassengerId':'Count on Ticket'},inplace=True) combined = combined.merge(ticketCount, on="Ticket",how="left" )<feature_engineering>
model.save('Digit_Recognition_CNN.model' )
Digit Recognizer
18,990,193
combined['Diff'] = combined['FamilySize'] - combined['Count on Ticket'] combined['Family Status'] = combined.apply(lambda x:"Has Family On Same Ticket" if(x['FamilySize'] - x['Count on Ticket'])<= 0 else "Family Not on same ticket",axis=1) <feature_engineering>
predictions = model.predict(test_image )
Digit Recognizer
18,990,193
combined['Family Status'] = combined.apply(lambda x:"Is Alone" if(x['FamilySize']==1)&(x['Count on Ticket']==1)else x['Family Status'],axis=1 )<feature_engineering>
predictions = np.argmax(predictions, axis = 1 )
Digit Recognizer
18,990,193
combined['Cabin Class'] = 'No Cabin' combined['Cabin Class'] = combined.apply(lambda x: "No Cabin" if pd.isna(x["Cabin"])else x["Cabin"][0] , axis=1) <merge>
submission = pd.DataFrame({'ImageId' : range(1,28001), 'Label' : predictions} )
Digit Recognizer
18,990,193
<feature_engineering><EOS>
submission.to_csv("submission.csv",index=False )
Digit Recognizer
17,531,578
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<merge>
%matplotlib inline
Digit Recognizer
17,531,578
companion = pd.pivot_table(combined, values='PassengerId',index=['Ticket'],columns=['AgeGroup'], aggfunc="count" ).reset_index().fillna(0) companion.columns = ['Ticket','No.of Adult Companion', 'No.of Child Companion'] combined = combined.merge(companion, on='Ticket',how='left' )<feature_engineering>
train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') train_df.head()
Digit Recognizer
17,531,578
combined.loc[combined['AgeGroup']=='adult','No.of Adult Companion'] = combined.loc[combined['AgeGroup']=='adult','No.of Adult Companion'] - 1 combined.loc[combined['AgeGroup']=='child','No.of Child Companion'] = combined.loc[combined['AgeGroup']=='child','No.of Child Companion'] - 1 combined['Companion'] = 'Adult & Child Companion' combined['Companion'] = combined.apply(lambda x:'Only Adult Companion' if(x['No.of Adult Companion'] > 0)&(x['No.of Child Companion']==0)else x['Companion'],axis=1) combined['Companion'] = combined.apply(lambda x:'Only Child Companion' if(x['No.of Adult Companion'] == 0)&(x['No.of Child Companion']>0)else x['Companion'],axis=1) combined['Companion'] = combined.apply(lambda x:'No Companion' if(x['No.of Adult Companion'] == 0)&(x['No.of Child Companion']==0)else x['Companion'],axis=1 )<groupby>
X = train_df.drop('label', axis=1 ).values.reshape(( -1, 28, 28, 1)) / 255 y = utils.to_categorical(train_df['label'], 10) print(X.shape, y.shape )
Digit Recognizer
17,531,578
combined[combined["Survived"].notnull() ].groupby(['AgeGroup','Companion'])['PassengerId'].count()<groupby>
train_datagen = ImageDataGenerator( rotation_range=8, shear_range=0.3, zoom_range=0.08, width_shift_range=0.08, height_shift_range=0.08, validation_split=0.1) train_datagen.fit(X )
Digit Recognizer
17,531,578
combined[combined["Survived"].notnull() ].groupby(['AgeGroup','Companion'])['Survived'].mean()<drop_column>
keras.backend.clear_session() model = keras.Sequential( [ keras.Input(shape=(28, 28, 1)) , layers.Conv2D(32, kernel_size=5, padding='same', kernel_regularizer=regularizers.l2(5e-4), activation='relu'), layers.BatchNormalization() , layers.Conv2D(32, kernel_size=5, padding='same', kernel_regularizer=regularizers.l2(5e-4), activation='relu'), layers.BatchNormalization() , layers.MaxPool2D(pool_size=(2,2)) , layers.Conv2D(64, kernel_size=3, kernel_regularizer=regularizers.l2(5e-4), activation='relu'), layers.BatchNormalization() , layers.Conv2D(64, kernel_size=3, kernel_regularizer=regularizers.l2(5e-4), activation='relu'), layers.BatchNormalization() , layers.MaxPool2D(pool_size=(2,2), strides=(2,2)) , layers.Conv2D(128, kernel_size=3, kernel_regularizer=regularizers.l2(5e-4), activation='relu'), layers.BatchNormalization() , layers.Flatten() , layers.Dense(256, activation='relu'), layers.BatchNormalization() , layers.Dense(10, activation='softmax'), ] ) model.summary()
Digit Recognizer
17,531,578
train_copy = combined[:train_len] test_copy = combined[train_len:].reset_index(drop=True) test_copy.drop(columns=['Survived'],inplace=True )<drop_column>
stopping = keras.callbacks.EarlyStopping(patience=2, monitor ='val_accuracy') optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07) callbacks = [stopping]
Digit Recognizer
17,531,578
combined.drop(columns=['PassengerId','Name','Age', 'AgeGroup','SibSp','Parch','Ticket','Cabin','Count on Ticket','Diff','No.of Adult Companion','No.of Child Companion'],inplace=True )<categorify>
batch_size = 84 epochs = 100 model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc']) history = model.fit( train_datagen.flow(X, y, subset='training'), validation_data=train_datagen.flow(X, y, batch_size=batch_size, subset='validation'), epochs=epochs, steps_per_epoch = X.shape[0] // batch_size, batch_size=batch_size, callbacks=callbacks, use_multiprocessing=True )
Digit Recognizer
17,531,578
combined = pd.get_dummies(combined, columns = ["Sex","Embarked","AgeBand","Family Status","Cabin Class","Companion"],drop_first=True )<drop_column>
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
17,531,578
train = combined[:train_len] test = combined[train_len:] test.drop(columns=['Survived'],inplace=True )<split>
X_test = test_df.values.reshape(( -1, 28, 28, 1)) / 255 y_hat = model.predict(X_test) y_class = y_hat.argmax(axis=-1) submission['Label'] = y_class
Digit Recognizer
17,531,578
<compute_train_metric><EOS>
submission.to_csv('output.csv', index=False) display(submission.tail() )
Digit Recognizer
17,862,039
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric>
import torch import torchvision from torch import nn from torchvision.transforms import transforms import pandas as pd import matplotlib.pyplot as plt from PIL import Image from sklearn.model_selection import train_test_split from torch import nn from torch import optim import torch.nn.functional as F import numpy as np from torch.utils.data import DataLoader, Dataset from torchvision import datasets, transforms, models from torchvision.datasets import ImageFolder from torch.autograd import Variable
Digit Recognizer
17,862,039
report=classification_report(y_test,pred) print("Decision Tree report ",report )<train_model>
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device )
Digit Recognizer
17,862,039
rfc=ensemble.RandomForestClassifier(max_depth=6,random_state=0,n_estimators=64) rfc.fit(X_train, y_train) pred_train = rfc.predict(X_train) pred=rfc.predict(X_test) pred_train_df=pd.DataFrame({"Actual":y_train,"Pred":pred_train}) pred_df=pd.DataFrame({"Actual":y_test,"Pred":pred}) cm=confusion_matrix(y_test,pred) print(cm) report=classification_report(y_test,pred) print("Random Forest report ",report )<save_to_csv>
train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
17,862,039
y_test_rfc = rfc.predict(test ).astype(int) test_out = pd.concat([test_copy['PassengerId'],pd.Series(y_test_rfc,name="Survived")],axis=1) test_out['Survived'] = test_out['Survived'].astype('int') test_out.to_csv('submission.csv',index=False )<install_modules>
x = train_df.iloc[:,1:].values y = train_df.iloc[:,0].values z = test_df.iloc[::].values print("X shape : {}".format(x.shape)) print("Y shape : {}".format(y.shape)) print("Z shape : {}".format(z.shape))
Digit Recognizer
17,862,039
%env SM_FRAMEWORK=tf.keras !pip install.. /input/segmentation-models-keras/Keras_Applications-1.0.8-py3-none-any.whl --quiet !pip install.. /input/segmentation-models-keras/image_classifiers-1.0.0-py3-none-any.whl --quiet !pip install.. /input/segmentation-models-keras/efficientnet-1.0.0-py3-none-any.whl --quiet !pip install.. /input/segmentation-models-keras/segmentation_models-1.0.1-py3-none-any.whl --quiet print("Segmentation Models installed." )<define_variables>
class Dataset(Dataset): def __init__(self, data): self.data = data self.n_samples = data.shape[0] self.x_data = torch.tensor(data.iloc[::].values, dtype=torch.long) def __getitem__(self, index): return self.x_data[index].reshape(28,28) def __len__(self): return self.n_samples def get_long_type(self, target): target = target.type(torch.LongTensor) return target
Digit Recognizer
17,862,039
DEBUG = False<import_modules>
input_size = 784 hidden_size = 100 num_classes = 10 num_epochs = 2 batch_size = 100 learning_rate = 0.001 train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transforms.Compose( [transforms.ToTensor() , transforms.Normalize(( 0.1307,),(0.3081,)) ]), download=True) test_dataset = Dataset(test_df) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
Digit Recognizer
17,862,039
print(tf.__version__ )<define_variables>
class CNNModel(nn.Module): def __init__(self): super(CNNModel, self ).__init__() self.cnn1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=0) self.relu1 = nn.ReLU() self.maxpool1 = nn.MaxPool2d(kernel_size=2) self.cnn2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0) self.relu2 = nn.ReLU() self.maxpool2 = nn.MaxPool2d(kernel_size=2) self.fc1 = nn.Linear(32 * 4 * 4, 10) def forward(self, x): out = self.cnn1(x) out = self.relu1(out) out = self.maxpool1(out) out = self.cnn2(out) out = self.relu2(out) out = self.maxpool2(out) out = out.view(out.size(0), -1) out = self.fc1(out) return out n_total_steps = len(train_loader) batch_size = 100 num_epochs = 10 model = CNNModel() if torch.cuda.is_available() : model.cuda() criterion = nn.CrossEntropyLoss() learning_rate = 0.1 optimizer = torch.optim.SGD(model.parameters() , lr=learning_rate )
Digit Recognizer
17,862,039
data_dir = '.. /input/ranzcr-clip-catheter-line-classification' model_dir = '.. /input/ranzcr-1st-place-solution-by-tf-models' seg_image_size = 1024 cls_image_size = 512 batch_size = 16<load_from_csv>
for epoch in range(num_epochs): for i,(images, labels)in enumerate(train_loader): images = images.to(device) train = Variable(images.view(100,1,28,28)).to(device) labels = labels.to(device) outputs = model(train) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() if(i + 1)% 100 == 0: print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{n_total_steps}], Loss: {loss.item() :.4f}' )
Digit Recognizer
17,862,039
df_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) <define_variables>
def evalueate_model(model, test_loader): predictions = [] model.eval() for images in test_loader: images = images.to(device) train = Variable(images.view(100,1,28,28 ).type(torch.float32)).to(device) with torch.no_grad() : predicts = model(train) predicts = predicts.argmax(axis=1) predicts = predicts.cpu().numpy() for pred in predicts: predictions.append(pred) return(predictions )
Digit Recognizer
17,862,039
seg_model_names = [ 'seg_model_V10_0.hdf5' ] cls_model_names = [ 'cls_model_V14_0.hdf5', 'cls_model_V15_1.hdf5', 'cls_model_V15_2.hdf5', 'cls_model_V16_3.hdf5', 'cls_model_V16_4.hdf5' ]<sort_values>
pred = evalueate_model(model, test_loader )
Digit Recognizer
17,862,039
tfrec_path = data_dir + '/test_tfrecords/*.tfrec' tfrec_file_names = sorted(tf.io.gfile.glob(tfrec_path)) tfrec_file_names = \ [ tfrec_file_names[0] ] if DEBUG else tfrec_file_names tfrec_file_names<split>
submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv') submission['Label'] = pred submission.head(25 )
Digit Recognizer
17,862,039
AUTOTUNE = tf.data.experimental.AUTOTUNE def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) return image def read_tfrecord(example): TFREC_FORMAT = { 'image': tf.io.FixedLenFeature([], tf.string), 'StudyInstanceUID': tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, TFREC_FORMAT) image = decode_image(example['image']) study_inst_id = example['StudyInstanceUID'] return image, study_inst_id def load_dataset(filenames): ds = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTOTUNE) ds = ds.map(read_tfrecord, num_parallel_calls=AUTOTUNE) return ds<create_dataframe>
submission.to_csv('submission.csv', index=False )
Digit Recognizer
17,862,039
raw_test_ds = load_dataset(tfrec_file_names) raw_test_ds<categorify>
submission.to_csv('submission.csv', index=False )
Digit Recognizer
17,850,788
study_inst_id_list = [ study_inst_id.numpy().decode('utf-8')for image, study_inst_id in raw_test_ds ] print(study_inst_id_list[ :10 ]) print(study_inst_id_list[ -10: ] )<prepare_x_and_y>
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split
Digit Recognizer
17,850,788
def drop_study_inst_id(image, study_inst_id): return image def preprocess_image(image): image_seg = tf.image.resize(image,(seg_image_size, seg_image_size)) image_seg = image_seg / 255.0 image_cls = tf.image.resize(image,(cls_image_size, cls_image_size)) return(( image_seg, image_cls),) def make_test_dataset() : ds = load_dataset(tfrec_file_names) ds = ds.map(drop_study_inst_id, num_parallel_calls=AUTOTUNE) ds = ds.map(preprocess_image, num_parallel_calls=AUTOTUNE) ds = ds.batch(batch_size) ds = ds.prefetch(AUTOTUNE) return ds<create_dataframe>
train_df = pd.read_csv('.. /input/digit-recognizer/train.csv') test_df = pd.read_csv('.. /input/digit-recognizer/test.csv') train_df.head() g = sns.countplot(train_df['label']) train_df['label'].value_counts()
Digit Recognizer
17,850,788
test_ds = make_test_dataset() test_ds<init_hyperparams>
X_train = train_df.drop(['label'],1) Y_train = train_df['label'] X_train = X_train/255.0 test = test_df/255.0 X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1) Y_train = to_categorical(Y_train,num_classes=10 )
Digit Recognizer
17,850,788
def load_model(weight_file_name): weight_file_path = os.path.join(model_dir, weight_file_name) model = tf.keras.models.load_model(weight_file_path) return model def make_seg_masks(x): fold_seg_masks = tf.stack(x, axis=0) average_seg_masks = \ tf.math.reduce_mean(fold_seg_masks, axis=0) return average_seg_masks def make_cls_inputs(x): cls_images = x[0] seg_masks = x[1] seg_masks = tf.image.resize( seg_masks,(cls_image_size, cls_image_size)) seg_masks = seg_masks * 255.0 cls_inputs = tf.concat([cls_images, seg_masks], axis=-1) return cls_inputs def make_model(cls_model_name): seg_images = tf.keras.Input( shape=(seg_image_size, seg_image_size, 3), name="seg_images") seg_outputs = [] for seg_model_name in seg_model_names: seg_model = load_model(seg_model_name) seg_output = seg_model(seg_images) seg_outputs.append(seg_output) seg_masks = L.Lambda( make_seg_masks, name="seg_masks" )(seg_outputs) cls_images = tf.keras.Input( shape=(cls_image_size, cls_image_size, 3), name="cls_images") cls_inputs = L.Lambda( make_cls_inputs, name="cls_inputs" )( [cls_images, seg_masks]) cls_model = load_model(cls_model_name) ett, others, pred = cls_model(cls_inputs) model = tf.keras.Model( inputs=[seg_images, cls_images], outputs=pred, name="infer_model") return model<find_best_params>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2 )
Digit Recognizer
17,850,788
strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )<prepare_output>
from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import BatchNormalization, Conv2D, Dense, Dropout, Flatten,MaxPooling2D from keras.callbacks import ReduceLROnPlateau from keras.applications import ResNet50
Digit Recognizer
17,850,788
df_sub['StudyInstanceUID'] = study_inst_id_list<define_variables>
datagen = ImageDataGenerator( rotation_range= 10, zoom_range= 0.2, width_shift_range=0.1, height_shift_range = 0.1, horizontal_flip = False, vertical_flip = False ) datagen.fit(X_train )
Digit Recognizer
17,850,788
target_cols = [ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]<feature_engineering>
model = Sequential([ Conv2D(64,(5,5),padding='same',input_shape=(28,28,1),activation='relu'), Conv2D(64,(5,5),padding='same',activation='relu'), MaxPooling2D(2,2), Dropout(0.2), Conv2D(64,(3,3),padding='same',activation='relu'), Conv2D(64,(3,3),padding='same',activation='relu'), MaxPooling2D(2,2), Dropout(0.5), Flatten() , Dense(128,activation='relu'), Dense(128,activation='relu'), Dropout(0.2), Dense(10,activation='softmax') ]) model.summary()
Digit Recognizer
17,850,788
df_subs = [df_sub.copy() for _ in range(PROBS.shape[0])] for i, this_sub in enumerate(df_subs): this_sub[target_cols] = PROBS[i] this_sub[target_cols] = \ this_sub[target_cols].rank(pct=True )<feature_engineering>
rms = RMSprop(lr=0.001,rho=0.9,epsilon=1e-08,decay=0.0) adam = Adam(lr=0.001 )
Digit Recognizer
17,850,788
rank_values = \ [this_sub[target_cols].values for this_sub in df_subs] df_sub[target_cols] = \ np.stack(rank_values, 0 ).mean(0 )<save_to_csv>
model.compile(optimizer=adam,loss='categorical_crossentropy',metrics='acc' )
Digit Recognizer
17,850,788
df_sub.to_csv('submission.csv', index=False) !head submission.csv<define_variables>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
17,850,788
batch_size = 1 image_size = 512 tta = True submit = True enet_type = ['resnet200d'] * 5 model_path = ['.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold0_cv953.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold1_cv955.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold2_cv955.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold3_cv957.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold4_cv954.pth']<set_options>
history = model.fit( datagen.flow(X_train,Y_train, batch_size=32), epochs = 30, validation_data =(X_val,Y_val), verbose = 1, steps_per_epoch=X_train.shape[0] // 32, callbacks=[learning_rate_reduction] )
Digit Recognizer
17,850,788
sys.path.append('.. /input/pytorch-image-models/pytorch-image-models-master') sys.path.append('.. /input/timm-pytorch-image-models/pytorch-image-models-master') DEBUG = False %matplotlib inline device = torch.device('cuda')if not DEBUG else torch.device('cpu' )<choose_model_class>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
17,850,788
class RANZCRResNet200D(nn.Module): def __init__(self, model_name='resnet200d', out_dim=11, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, out_dim) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output<concatenate>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("new3.csv",index=False )
Digit Recognizer
17,782,694
transforms_test = albumentations.Compose([ Resize(image_size, image_size), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2() ] )<load_from_csv>
%matplotlib inline
Digit Recognizer
17,782,694
test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') test['file_path'] = test.StudyInstanceUID.apply(lambda x: os.path.join('.. /input/ranzcr-clip-catheter-line-classification/test', f'{x}.jpg')) target_cols = test.iloc[:, 1:12].columns.tolist() test_dataset = RANZCRDataset(test, 'test', transform=transforms_test) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=24 )<choose_model_class>
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') subs = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' )
Digit Recognizer
17,782,694
if submit: test_preds_1 = [] for i in range(len(enet_type)) : if enet_type[i] == 'resnet200d': print('resnet200d loaded') model = RANZCRResNet200D(enet_type[i], out_dim=len(target_cols)) model = model.to(device) model.load_state_dict(torch.load(model_path[i], map_location='cuda:0')) if tta: test_preds_1 += [tta_inference_func(test_loader)] else: test_preds_1 += [inference_func(test_loader)]<load_from_csv>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) X_train = X_train / 255.0 X_test = test / 255.0 X_train = X_train.values.reshape(-1,28,28,1) X_test = X_test.values.reshape(-1,28,28,1) Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
17,782,694
submission = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') submission[target_cols] = np.mean(test_preds_1, axis=0 )<set_options>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 )
Digit Recognizer
17,782,694
sys.path.append('.. /input/pytorch-images-seresnet') warnings.filterwarnings('ignore') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )<load_from_csv>
nets = 17 model = [0] *nets for j in range(nets): model[j] = Sequential() model[j].add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1))) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(128, kernel_size = 4, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Flatten()) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
17,782,694
IMAGE_SIZE = 640 BATCH_SIZE = 128 TEST_PATH = '.. /input/ranzcr-clip-catheter-line-classification/test' MODEL_PATH = '.. /input/resnet200d-public/resnet200d_320_CV9632.pth' test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv' )<categorify>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x )
Digit Recognizer
17,782,694
def get_transforms() : return Compose([ Resize(IMAGE_SIZE, IMAGE_SIZE), Normalize( ), ToTensorV2() , ] )<choose_model_class>
history = [0] * nets epochs = 55 for j in range(nets): X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.1) history[j] = model[j].fit(datagen.flow(X_train2,Y_train2, batch_size=64), epochs = epochs, steps_per_epoch = X_train2.shape[0]//64, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( j+1,epochs,max(history[j].history['accuracy']),max(history[j].history['val_accuracy'])) )
Digit Recognizer
17,782,694
class ResNet200D(nn.Module): def __init__(self, model_name='resnet200d_320'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output<categorify>
results = np.zeros(( X_test.shape[0],10)) for j in range(nets): results = results + model[j].predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("Submission_Data_Aug.csv",index=False )
Digit Recognizer
17,531,813
def inference(models, test_loader, device): tk0 = tqdm(enumerate(test_loader), total=len(test_loader)) probs = [] for i,(images)in tk0: images = images.to(device) avg_preds = [] for model in models: with torch.no_grad() : y_preds1 = model(images) y_preds2 = model(images.flip(-1)) y_preds =(y_preds1.sigmoid().to('cpu' ).numpy() + y_preds2.sigmoid().to('cpu' ).numpy())/ 2 avg_preds.append(y_preds) avg_preds = np.mean(avg_preds, axis=0) probs.append(avg_preds) probs = np.concatenate(probs) return probs<choose_model_class>
class my_params: seed = 1 batch_size = 256 test_size = 0.1
Digit Recognizer
17,531,813
model = ResNet200D() model.load_state_dict(torch.load(MODEL_PATH)['model']) model.eval() models = [model.to(device)]<load_pretrained>
train_pd = pd.read_csv(".. /input/digit-recognizer/train.csv", dtype=np.float32) final_test = pd.read_csv(".. /input/digit-recognizer/test.csv", dtype=np.float32) sample_sub = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv") train_pd.info()
Digit Recognizer
17,531,813
test_dataset = TestDataset(test, transform=get_transforms()) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4 , pin_memory=True) predictions = inference(models, test_loader, device )<feature_engineering>
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device )
Digit Recognizer
17,531,813
test[target_cols] = predictions test[target_cols].head(5 )<load_from_csv>
labels_np = train_pd.label.values features_np = train_pd.loc[:, train_pd.columns != 'label'].values/255 features_train, features_val, labels_train, labels_val = train_test_split(features_np, labels_np, test_size = my_params.test_size, random_state = my_params.seed )
Digit Recognizer
17,531,813
Final_Submission = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') Final_Submission[target_cols] =(test[target_cols]**0.5 + submission[target_cols]**0.5)/2<save_to_csv>
featuresTrain = torch.from_numpy(features_train) labelsTrain = torch.from_numpy(labels_train ).type(torch.LongTensor) featuresVal = torch.from_numpy(features_val) labelsVal = torch.from_numpy(labels_val ).type(torch.LongTensor )
Digit Recognizer
17,531,813
Final_Submission.to_csv("submission.csv", index=False )<define_variables>
train_transforms = transforms.Compose([transforms.ToPILImage() , transforms.RandomRotation(degrees=(-10, 10)) , transforms.RandomAffine(0, translate=(0.1,0.1)) , transforms.ToTensor() ] )
Digit Recognizer
17,531,813
sys.path = [ '.. /input/smp20210127/segmentation_models.pytorch-master/segmentation_models.pytorch-master/', '.. /input/smp20210127/EfficientNet-PyTorch-master/EfficientNet-PyTorch-master', '.. /input/smp20210127/pytorch-image-models-master/pytorch-image-models-master', '.. /input/smp20210127/pretrained-models.pytorch-master/pretrained-models.pytorch-master', ] + sys.path<set_options>
class custom_mnist(Dataset): def __init__(self, feat_tens, label_tens, transform=None): self.data = feat_tens.reshape(len(feat_tens), 1, 28, 28) self.label_data = label_tens self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, index): image = self.data[index] label = self.label_data[index] if self.transform is not None: image = self.transform(image) return image, label
Digit Recognizer
17,531,813
%matplotlib inline device = torch.device('cuda')if torch.cuda.is_available() else torch.device('cpu' )<define_variables>
train_dataset = custom_mnist(featuresTrain, labelsTrain) val_dataset = custom_mnist(featuresVal, labelsVal) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size = my_params.batch_size, shuffle = True) val_loader = torch.utils.data.DataLoader(val_dataset) print("train_loader: {}".format(len(train_loader)) , " val_loader: {}".format(len(val_loader)) )
Digit Recognizer
17,531,813
data_dir = '.. /input/ranzcr-clip-catheter-line-classification' model_dir = '.. /input/ranzcr-public-model-qishen' num_workers = 2 image_size = 512 batch_size = 8<load_from_csv>
img, lab = train_dataset.__getitem__(3) print("image_shape: ", img.shape) print("image_label: ", lab) plt.imshow(np.squeeze(img))
Digit Recognizer
17,531,813
df_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) df_sub = df_sub.iloc[:358] if df_sub.shape[0] == 3582 else df_sub<data_type_conversions>
class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self ).__init__() self.features = torch.nn.Sequential( nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, padding=1), nn.ReLU() , nn.BatchNorm2d(num_features=32), nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1), nn.ReLU() , nn.BatchNorm2d(num_features=32), nn.MaxPool2d(kernel_size =(2,2), stride=2), nn.Dropout(0.25), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1), nn.ReLU() , nn.BatchNorm2d(num_features=64), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1), nn.ReLU() , nn.BatchNorm2d(num_features=64), nn.MaxPool2d(kernel_size =(2,2), stride=2), nn.Dropout(0.25), nn.Flatten() , nn.Linear(in_features=3136, out_features=512), nn.BatchNorm1d(num_features=512), nn.Dropout(0.25), nn.Linear(in_features=512, out_features=1024), nn.BatchNorm1d(num_features=1024), nn.Dropout(0.50), nn.Linear(in_features=1024, out_features=10), nn.ReLU() , nn.Softmax(dim=1), ) def forward(self, x): logits = self.features(x) return logits NeuralNetwork()
Digit Recognizer
17,531,813
class RANZCRDatasetTest(Dataset): def __init__(self, df): self.df = df.reset_index(drop=True) def __len__(self): return self.df.shape[0] def __getitem__(self, index): row = self.df.iloc[index] image = cv2.imread(os.path.join(data_dir, 'test', row.StudyInstanceUID + '.jpg')) [:, :, ::-1] image1024 = cv2.resize(image ,(1024, 1024)).astype(np.float32 ).transpose(2, 0, 1)/ 255. image512 = cv2.resize(image ,(512, 512)).astype(np.float32 ).transpose(2, 0, 1)/ 255. return { '1024': torch.tensor(image1024), '512': torch.tensor(image512), } dataset_test = RANZCRDatasetTest(df_sub) test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size, shuffle=False, num_workers=num_workers )<categorify>
model = NeuralNetwork() criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adamax(model.parameters() , lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.8) epochs = 40 steps = 0 train_losses, val_losses, val_accs = [], [], [] early_stop_count = 0
Digit Recognizer
17,531,813
class SegModel(nn.Module): def __init__(self, backbone): super(SegModel, self ).__init__() self.seg = smp.UnetPlusPlus(encoder_name=backbone, encoder_weights=None, classes=2, activation=None) def forward(self,x): global_features = self.seg.encoder(x) seg_features = self.seg.decoder(*global_features) seg_features = self.seg.segmentation_head(seg_features) return seg_features class enetv2(nn.Module): def __init__(self, backbone): super(enetv2, self ).__init__() self.enet = timm.create_model(backbone, False) self.enet.conv_stem.weight = nn.Parameter(self.enet.conv_stem.weight.repeat(1,5//3+1,1,1)[:, :5]) self.myfc = nn.Linear(self.enet.classifier.in_features, 12) self.enet.classifier = nn.Identity() def extract(self, x): return self.enet(x) def forward(self, x, mask): mask = F.interpolate(mask, x.shape[2]) x = torch.cat([x, mask], 1) x = self.extract(x) x = self.myfc(x) return x<define_variables>
if torch.cuda.is_available() : model = model.cuda() criterion = criterion.cuda() print("GPU Training") else: print("CPU Training" )
Digit Recognizer
17,531,813
enet_type_seg = 'timm-efficientnet-b1' kernel_type_seg = 'unetb1_2cbce_1024T15tip_lr1e4_bs4_augv2_30epo' enet_type_cls = 'tf_efficientnet_b1_ns' kernel_type_cls = 'enetb1_5ch_512_lr3e4_bs32_30epo'<load_pretrained>
print("---- Starting Model Training ---- ") for e in range(epochs): if early_stop_count >= 10: print(" Validation Accuracy not improved for {} epochs.val_acc: {}.".format(early_stop_count, sorted(val_accs)[-1])) break print(" -- Epoch {}/{} -- ".format(e+1, epochs)) running_loss = 0 steps = 0 for images, labels in train_loader: if torch.cuda.is_available() : images = images.to(device) labels = labels.to(device) steps += 1 optimizer.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % len(train_loader)== 0: val_loss = 0 val_accuracy = 0 train_accuracy = 0 with torch.no_grad() : model.eval() for images, labels in val_loader: if torch.cuda.is_available() : images = images.to(device) labels = labels.to(device) log_ps = model(images) val_loss += criterion(log_ps, labels) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) val_accuracy += torch.mean(equals.type(torch.FloatTensor)) for images, labels in train_loader: if torch.cuda.is_available() : images = images.to(device) labels = labels.to(device) log_ps = model(images) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) train_accuracy += torch.mean(equals.type(torch.FloatTensor)) model.train() train_losses.append(running_loss/len(train_loader)) val_losses.append(val_loss/len(val_loader)) val_accs.append(val_accuracy/len(val_loader)) if val_accs[-1] == sorted(val_accs)[-1]: best_model_params = model.state_dict() early_stop_count = 0 print("+") print("Training Loss: {:.4f} ".format(train_losses[-1]), "Training Acc: {:.4f}".format(train_accuracy/len(train_loader)) , "Valid Loss: {:.4f} ".format(val_losses[-1]), "Valid Accuracy: {:.4f}".format(val_accs[-1])) early_stop_count += 1 if e ==(epochs-1): print(" Top Validation Accuracy: {}".format(sorted(val_accs)[-1]))
Digit Recognizer
17,531,813
models_seg = [] for fold in range(5): model = SegModel(enet_type_seg) model = model.to(device) model_file = os.path.join(model_dir, f'{kernel_type_seg}_best_fold{fold}.pth') model.load_state_dict(torch.load(model_file), strict=True) model.eval() models_seg.append(model) models_cls = [] for fold in range(5): model = enetv2(enet_type_cls) model = model.to(device) model_file = os.path.join(model_dir, f'{kernel_type_cls}_best_fold{fold}.pth') model.load_state_dict(torch.load(model_file), strict=True) model.eval() models_cls.append(model )<create_dataframe>
model.load_state_dict(best_model_params )
Digit Recognizer
17,531,813
df_subs = [df_sub.copy() for _ in range(PROBS.shape[0])] for i, this_sub in enumerate(df_subs): this_sub[[ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]] = PROBS[i] this_sub[[ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]] = this_sub[[ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]].rank(pct=True) df_sub[[ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]] = np.stack([this_sub[[ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]].values for this_sub in df_subs], 0 ).mean(0) df_sub.to_csv('submission.csv', index=False )<set_options>
final_test_np = final_test.values/255 test_labels = np.zeros(final_test_np.shape) test_tn = torch.from_numpy(final_test_np) test_labels = torch.from_numpy(test_labels )
Digit Recognizer
17,531,813
sys.path.append('.. /input/pytorch-images-seresnet') warnings.filterwarnings('ignore') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )<define_variables>
class test_mnist(Dataset): def __init__(self, feat_tens): self.data = feat_tens.reshape(len(feat_tens), 1, 28, 28) def __len__(self): return len(self.data) def __getitem__(self, index): image = self.data[index] return image submission_dataset = test_mnist(test_tn) submission_loader = torch.utils.data.DataLoader(submission_dataset, batch_size = 256, shuffle = False )
Digit Recognizer
17,531,813
<load_from_csv><EOS>
submission = [['ImageId', 'Label']] with torch.no_grad() : model.eval() image_id = 1 for images in submission_loader: if torch.cuda.is_available() : images = images.to(device) log_ps = model(images) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) for prediction in top_class: submission.append([image_id, prediction.item() ]) image_id += 1 submission_df = pd.DataFrame(submission) submission_df.columns = submission_df.iloc[0] submission_df = submission_df.drop(0, axis=0) submission_df.to_csv("submission.csv", index=False )
Digit Recognizer
17,484,229
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
train_set = pd.read_csv('.. /input/digit-recognizer/train.csv') test_set = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
17,484,229
def get_transforms() : return Compose([ Resize(IMAGE_SIZE, IMAGE_SIZE), Normalize() , ToTensorV2() , ] )<choose_model_class>
X = train_set.drop('label', axis=1) labels = train_set['label']
Digit Recognizer
17,484,229
class ResNet200D(nn.Module): def __init__(self, model_name='resnet200d_320'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output class SeResNet152D(nn.Module): def __init__(self, model_name='seresnet152d_320'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output class EfficientNetB5(nn.Module): def __init__(self, model_name='tf_efficientnet_b5_ns'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.classifier.in_features self.model.global_pool = nn.Identity() self.model.classifier = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.classifier(pooled_features) return output<categorify>
y = pd.get_dummies(labels )
Digit Recognizer
17,484,229
def inference(models, test_loader, device): tk0 = tqdm(enumerate(test_loader), total=len(test_loader)) probs = [] for i,(images)in tk0: images = images.to(device) avg_preds = [] for model in models: with torch.no_grad() : y_preds1 = model(images) y_preds2 = model(images.flip(-1)) y_preds =(y_preds1.sigmoid().to('cpu' ).numpy() + y_preds2.sigmoid().to('cpu' ).numpy())/ 2 avg_preds.append(y_preds) avg_preds = np.mean(avg_preds, axis=0) probs.append(avg_preds) probs = np.concatenate(probs) return probs<choose_model_class>
X = X / 255.0 test_set = test_set / 255.0
Digit Recognizer
17,484,229
models200D = [] model = ResNet200D() model.load_state_dict(torch.load(MODEL_PATH_resnet200d)['model']) model.eval() model.to(device) models200D.append(model) models152D = [] model = SeResNet152D() model.load_state_dict(torch.load(MODEL_PATH_seresnet152d)['model']) model.eval() model.to(device) models152D.append(model) modelsB5 = [] model = EfficientNetB5() model.load_state_dict(torch.load(MODEL_PATH_efficientnet_b5)['model']) model.eval() model.to(device) modelsB5.append(model )<create_dataframe>
X_train,X_val, y_train, y_val = train_test_split(X,y,test_size=0.2, random_state=42 )
Digit Recognizer
17,484,229
test_dataset = TestDataset(test, transform=get_transforms()) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4 , pin_memory=True) predictions200d = inference(models200D, test_loader, device) del models200D gc.collect() predictions152d = inference(models152D, test_loader, device) del models152D gc.collect() predictionsB5 = inference(modelsB5, test_loader, device) <drop_column>
batch_size = 32 epochs=50
Digit Recognizer
17,484,229
variable_list = %who_ls for _ in variable_list: if _ not in("predictions200d", "predictions152d", "predictionsB5"): del globals() [_] %who_ls<define_variables>
simple_NN = keras.Sequential([ layers.Dense(100, activation='relu', input_shape=(28,28,1)) , layers.Dropout(0.3), layers.Flatten() , layers.Dense(units=100, activation='relu'), layers.Dense(units=10, activation='softmax') ])
Digit Recognizer
17,484,229
ROOT = Path.cwd().parent INPUT = ROOT / "input" OUTPUT = ROOT / "output" DATA = INPUT / "ranzcr-clip-catheter-line-classification" TRAIN = DATA / "train" TEST = DATA / "test" TRAINED_MODEL = INPUT / "ranzcr-clip-weights-for-multi-head-model-v2" TMP = ROOT / "tmp" TMP.mkdir(exist_ok=True) RANDAM_SEED = 1086 N_CLASSES = 11 FOLDS = [0, 1, 2, 3, 4] N_FOLD = len(FOLDS) IMAGE_SIZE =(512, 512) FAST_COMMIT = False CLASSES = [ 'ETT - Abnormal', 'ETT - Borderline', 'ETT - Normal', 'NGT - Abnormal', 'NGT - Borderline', 'NGT - Incompletely Imaged', 'NGT - Normal', 'CVC - Abnormal', 'CVC - Borderline', 'CVC - Normal', 'Swan Ganz Catheter Present' ]<load_from_csv>
simple_NN.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) simple_NN.optimizer.lr=0.001
Digit Recognizer
17,484,229
for p in DATA.iterdir() : print(p.name) train = pd.read_csv(DATA / "train.csv") smpl_sub = pd.read_csv(DATA / "sample_submission.csv") smpl_sub.shape<split>
early_stopping = EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)
Digit Recognizer
17,484,229
if FAST_COMMIT and len(smpl_sub)== 3582: smpl_sub = smpl_sub.iloc[:64 * 3].reset_index(drop=True )<categorify>
lrr = ReduceLROnPlateau(monitor='val_loss',patience=3,verbose=1,factor=0.5, min_lr=0.00001 )
Digit Recognizer
17,484,229
def multi_label_stratified_group_k_fold(label_arr: np.array, gid_arr: np.array, n_fold: int, seed: int=42): np.random.seed(seed) random.seed(seed) start_time = time.time() n_train, n_class = label_arr.shape gid_unique = sorted(set(gid_arr)) n_group = len(gid_unique) gid2aid = dict(zip(gid_unique, range(n_group))) aid_arr = np.vectorize(lambda x: gid2aid[x] )(gid_arr) cnts_by_class = label_arr.sum(axis=0) col, row = np.array(sorted(enumerate(aid_arr), key=lambda x: x[1])).T cnts_by_group = coo_matrix( (np.ones(len(label_arr)) ,(row, col)) ).dot(coo_matrix(label_arr)).toarray().astype(int) del col del row cnts_by_fold = np.zeros(( n_fold, n_class), int) groups_by_fold = [[] for fid in range(n_fold)] group_and_cnts = list(enumerate(cnts_by_group)) np.random.shuffle(group_and_cnts) print("finished preparation", time.time() - start_time) for aid, cnt_by_g in sorted(group_and_cnts, key=lambda x: -np.std(x[1])) : best_fold = None min_eval = None for fid in range(n_fold): cnts_by_fold[fid] += cnt_by_g fold_eval =(cnts_by_fold / cnts_by_class ).std(axis=0 ).mean() cnts_by_fold[fid] -= cnt_by_g if min_eval is None or fold_eval < min_eval: min_eval = fold_eval best_fold = fid cnts_by_fold[best_fold] += cnt_by_g groups_by_fold[best_fold].append(aid) print("finished assignment.", time.time() - start_time) gc.collect() idx_arr = np.arange(n_train) for fid in range(n_fold): val_groups = groups_by_fold[fid] val_indexs_bool = np.isin(aid_arr, val_groups) train_indexs = idx_arr[~val_indexs_bool] val_indexs = idx_arr[val_indexs_bool] print("[fold {}]".format(fid), end=" ") print("n_group:(train, val)=({}, {})".format(n_group - len(val_groups), len(val_groups)) , end=" ") print("n_sample:(train, val)=({}, {})".format(len(train_indexs), len(val_indexs))) yield train_indexs, val_indexs<categorify>
history_simple_NN = simple_NN.fit( x=X_train, y=y_train, validation_data=(X_val, y_val), batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2, callbacks=[early_stopping, lrr] )
Digit Recognizer
17,484,229
label_arr = train[CLASSES].values group_id = train.PatientID.values train_val_indexs = list( multi_label_stratified_group_k_fold( label_arr, group_id, N_FOLD, RANDAM_SEED ) )<feature_engineering>
CNN = keras.Sequential([ layers.Conv2D(32, kernel_size=(3,3), activation='relu', padding='same', input_shape=(28,28,1)) , layers.Conv2D(64, kernel_size=(3,3), activation='relu', padding='same'), layers.Conv2D(64, kernel_size=(3,3), activation='relu', padding='same'), layers.Flatten() , layers.Dense(10, activation='softmax') ])
Digit Recognizer
17,484,229
train["fold"] = -1 for fold_id,(trn_idx, val_idx)in enumerate(train_val_indexs): train.loc[val_idx, "fold"] = fold_id train.groupby("fold")[CLASSES].sum()<train_model>
CNN.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) CNN.optimizer.lr=0.001
Digit Recognizer
17,484,229
def resize_images(img_id, input_dir, output_dir, resize_to=(512, 512), ext="png"): img_path = input_dir / f"{img_id}.jpg" save_path = output_dir / f"{img_id}.{ext}" img = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, resize_to) cv2.imwrite(str(save_path), img,) TEST_RESIZED = TMP / "test_{0}x{1}".format(*IMAGE_SIZE) TEST_RESIZED.mkdir(exist_ok=True) TEST_RESIZED _ = Parallel(n_jobs=2, verbose=5 )([ delayed(resize_images )(img_id, TEST, TEST_RESIZED, IMAGE_SIZE, "png") for img_id in smpl_sub.StudyInstanceUID.values ] )<choose_model_class>
history_CNN = CNN.fit( x=X_train, y=y_train, validation_data=(X_val, y_val), batch_size=batch_size, epochs=epochs, shuffle= True, verbose=2, callbacks=[early_stopping, lrr] )
Digit Recognizer
17,484,229
def get_activation(activ_name: str="relu"): act_dict = { "relu": nn.ReLU(inplace=True), "tanh": nn.Tanh() , "sigmoid": nn.Sigmoid() , "identity": nn.Identity() } if activ_name in act_dict: return act_dict[activ_name] else: raise NotImplementedError class Conv2dBNActiv(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, padding: int=0, bias: bool=False, use_bn: bool=True, activ: str="relu" ): super(Conv2dBNActiv, self ).__init__() layers = [] layers.append(nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding, bias=bias)) if use_bn: layers.append(nn.BatchNorm2d(out_channels)) layers.append(get_activation(activ)) self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) class SpatialAttentionBlock(nn.Module): def __init__( self, in_channels: int, out_channels_list: tp.List[int], ): super(SpatialAttentionBlock, self ).__init__() self.n_layers = len(out_channels_list) channels_list = [in_channels] + out_channels_list assert self.n_layers > 0 assert channels_list[-1] == 1 for i in range(self.n_layers - 1): in_chs, out_chs = channels_list[i: i + 2] layer = Conv2dBNActiv(in_chs, out_chs, 3, 1, 1, activ="relu") setattr(self, f"conv{i + 1}", layer) in_chs, out_chs = channels_list[-2:] layer = Conv2dBNActiv(in_chs, out_chs, 3, 1, 1, activ="sigmoid") setattr(self, f"conv{self.n_layers}", layer) def forward(self, x): h = x for i in range(self.n_layers): h = getattr(self, f"conv{i + 1}" )(h) h = h * x return h<choose_model_class>
CNN_2 = keras.Sequential([ layers.Conv2D(32, kernel_size=(3,3), activation='relu', padding='same', input_shape=(28,28,1)) , layers.MaxPooling2D(pool_size=(2, 2)) , layers.Conv2D(64, kernel_size=(3,3), activation='relu', padding='same'), layers.MaxPooling2D(pool_size=(2, 2)) , layers.Conv2D(64, kernel_size=(3,3), activation='relu', padding='same'), layers.MaxPooling2D(pool_size=(2, 2)) , layers.Flatten() , layers.Dropout(0.4), layers.Dense(10, activation='softmax') ] )
Digit Recognizer
17,484,229
class MultiHeadResNet200D(nn.Module): def __init__( self, out_dims_head: tp.List[int]=[3, 4, 3, 1], pretrained=False ): self.base_name = "resnet200d_320" self.n_heads = len(out_dims_head) super(MultiHeadResNet200D, self ).__init__() base_model = timm.create_model( self.base_name, num_classes=sum(out_dims_head), pretrained=False) in_features = base_model.num_features if pretrained: pretrained_model_path = '.. /input/startingpointschestx/resnet200d_320_chestx.pth' state_dict = dict() for k, v in torch.load(pretrained_model_path, map_location='cpu')["model"].items() : if k[:6] == "model.": k = k.replace("model.", "") state_dict[k] = v base_model.load_state_dict(state_dict) base_model.reset_classifier(0, '') self.backbone = base_model for i, out_dim in enumerate(out_dims_head): layer_name = f"head_{i}" layer = nn.Sequential( SpatialAttentionBlock(in_features, [64, 32, 16, 1]), nn.AdaptiveAvgPool2d(output_size=1), nn.Flatten(start_dim=1), nn.Linear(in_features, in_features), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(in_features, out_dim)) setattr(self, layer_name, layer) def forward(self, x): h = self.backbone(x) hs = [ getattr(self, f"head_{i}" )(h)for i in range(self.n_heads)] y = torch.cat(hs, axis=1) return y m = MultiHeadResNet200D([3, 4, 3, 1], False) m = m.eval() x = torch.rand(1, 3, 256, 256) with torch.no_grad() : y = m(x) print("[forward test]") print("input:\t{} output:\t{}".format(x.shape, y.shape)) del m; del x; del y gc.collect()<categorify>
CNN_2.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) CNN.optimizer.lr=0.001
Digit Recognizer
17,484,229
class LabeledImageDataset(data.Dataset): def __init__( self, file_list: tp.List[ tp.Tuple[tp.Union[str, Path], tp.Union[int, float, np.ndarray]] ], transform_list: tp.List[tp.Dict], ): self.file_list = file_list self.transform = ImageTransformForCls(transform_list) def __len__(self): return len(self.file_list) def __getitem__(self, index): img_path, label = self.file_list[index] img = self._read_image_as_array(img_path) img, label = self.transform(( img, label)) return img, label def _read_image_as_array(self, path: str): img_arr = cv2.imread(str(path)) img_arr = cv2.cvtColor(img_arr, cv2.COLOR_BGR2RGB) return img_arr<create_dataframe>
history_CNN_2 = CNN_2.fit( x=X_train, y=y_train, validation_data=(X_val, y_val), batch_size=batch_size, epochs=epochs, shuffle= True, verbose=2, callbacks=[early_stopping, lrr] )
Digit Recognizer
17,484,229
def get_dataloaders_for_inference( file_list: tp.List[tp.List], batch_size=64 ): dataset = LabeledImageDataset( file_list, transform_list=[ [ "Normalize", { "always_apply": True, "max_pixel_value": 255.0, "mean": ["0.4887381077884414"], "std": ["0.23064819430546407"] } ], ["ToTensorV2", {"always_apply": True}], ] ) loader = data.DataLoader( dataset, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False ) return loader<categorify>
datagen = ImageDataGenerator( featurewise_center=False, featurewise_std_normalization=False, rotation_range=10, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False, validation_split=0.2 ) datagen.fit(X_train)
Digit Recognizer
17,484,229
class ImageTransformBase: def __init__(self, data_augmentations: tp.List[tp.Tuple[str, tp.Dict]]): augmentations_list = [ self._get_augmentation(aug_name )(**params) for aug_name, params in data_augmentations] self.data_aug = albumentations.Compose(augmentations_list) def __call__(self, pair: tp.Tuple[np.ndarray])-> tp.Tuple[np.ndarray]: raise NotImplementedError def _get_augmentation(self, aug_name: str)-> tp.Tuple[ImageOnlyTransform, DualTransform]: if hasattr(albumentations, aug_name): return getattr(albumentations, aug_name) else: return eval(aug_name) class ImageTransformForCls(ImageTransformBase): def __init__(self, data_augmentations: tp.List[tp.Tuple[str, tp.Dict]]): super(ImageTransformForCls, self ).__init__(data_augmentations) def __call__(self, in_arrs: tp.Tuple[np.ndarray])-> tp.Tuple[np.ndarray]: img, label = in_arrs augmented = self.data_aug(image=img) img = augmented["image"] return img, label<init_hyperparams>
data_size = len(X_train) steps_per_epoch = int(data_size / batch_size) print(steps_per_epoch )
Digit Recognizer
17,484,229
def load_setting_file(path: str): with open(path)as f: settings = yaml.safe_load(f) return settings def set_random_seed(seed: int = 42, deterministic: bool = False): random.seed(seed) np.random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = deterministic def run_inference_loop(stgs, model, loader, device): model.to(device) model.eval() pred_list = [] with torch.no_grad() : for x, t in tqdm(loader): y_1 = model(x.to(device)) y_2 = model(x.to(device ).flip(-1)) y_preds =( y_1.sigmoid().detach().cpu().numpy() + y_2.sigmoid().detach().cpu().numpy() )/ 2 pred_list.append(y_preds) pred_arr = np.concatenate(pred_list) del pred_list return pred_arr<set_options>
history_CNN_2_datagen = CNN_2.fit( datagen.flow(X_train,y_train,batch_size=batch_size), epochs=epochs, shuffle=True, validation_data=(X_val,y_val), verbose=2, callbacks=[early_stopping, lrr], steps_per_epoch=steps_per_epoch )
Digit Recognizer
17,484,229
if not torch.cuda.is_available() : device = torch.device("cpu") else: device = torch.device("cuda") print(device )<load_pretrained>
val_predictions = CNN_2.predict(X_val) y_pred = val_predictions.argmax(axis=-1 )
Digit Recognizer
17,484,229
model_dir = TRAINED_MODEL test_dir = TEST_RESIZED test_file_list = [ (test_dir / f"{img_id}.png", [-1] * 11) for img_id in smpl_sub["StudyInstanceUID"].values ] test_loader = get_dataloaders_for_inference(test_file_list, batch_size=64) test_preds_arr = np.zeros(( N_FOLD, len(smpl_sub), N_CLASSES)) for fold_id in FOLDS: print(f"[fold {fold_id}]") stgs = load_setting_file(model_dir / f"fold{fold_id}" / "settings.yml") stgs["model"]["params"]["pretrained"] = False model = MultiHeadResNet200D(**stgs["model"]["params"]) model_path = model_dir / f"best_model_fold{fold_id}.pth" model.load_state_dict(torch.load(model_path, map_location=device)) test_pred = run_inference_loop(stgs, model, test_loader, device) test_preds_arr[fold_id] = test_pred del model torch.cuda.empty_cache() gc.collect()<feature_engineering>
predictions = CNN_2.predict(test_set) results = predictions.argmax(axis=-1 )
Digit Recognizer
17,484,229
sub = smpl_sub.copy() sub[CLASSES] =( 0.50 *(test_preds_arr.argsort(axis=1 ).argsort(axis=1 ).mean(axis=0)) + 0.30 *(predictions200d.argsort(axis=0 ).argsort(axis=0)) + 0.08 *(predictions152d.argsort(axis=0 ).argsort(axis=0)) + 0.12 *(predictionsB5.argsort(axis=0 ).argsort(axis=0)) ) sub.to_csv("submission.csv", index=False )<install_modules>
result = pd.DataFrame() result['ImageId'] = list(range(1,28001)) result['Label'] = results result.to_csv("output.csv", index = False)
Digit Recognizer
20,482,624
!pip install timm<set_options>
train_data = pd.read_csv('.. /input/digit-recognizer/train.csv') test_data = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
20,482,624
sys.path.append('.. /input/pytorch-images-seresnet') warnings.filterwarnings('ignore') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') IMAGE_SIZE = 640 BATCH_SIZE = 128 TEST_PATH = '.. /input/ranzcr-clip-catheter-line-classification/test' MODEL_PATH_resnet200d = '.. /input/resnet200d-public/resnet200d_320_CV9632.pth' MODEL_PATH_seresnet152d = '.. /input/seresnet152d-cv9615/seresnet152d_320_CV96.15.pth' test = pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv') class TestDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.file_names = df['StudyInstanceUID'].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{TEST_PATH}/{file_name}.jpg' image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: augmented = self.transform(image=image) image = augmented['image'] return image def get_transforms() : return Compose([ Resize(IMAGE_SIZE, IMAGE_SIZE), Normalize( ), ToTensorV2() , ]) class ResNet200D(nn.Module): def __init__(self, model_name='resnet200d_320'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output class SeResNet152D(nn.Module): def __init__(self, model_name='seresnet152d_320'): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, 11) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output def inference(models, test_loader, device): tk0 = tqdm(enumerate(test_loader), total=len(test_loader)) probs = [] for i,(images)in tk0: images = images.to(device) avg_preds = [] for model in models: with torch.no_grad() : y_preds1 = model(images) y_preds2 = model(images.flip(-1)) y_preds =(y_preds1.sigmoid().to('cpu' ).numpy() + y_preds2.sigmoid().to('cpu' ).numpy())/ 2 avg_preds.append(y_preds) avg_preds = np.mean(avg_preds, axis=0) probs.append(avg_preds) probs = np.concatenate(probs) return probs models200D = [] model = ResNet200D() model.load_state_dict(torch.load(MODEL_PATH_resnet200d)['model']) model.eval() model.to(device) models200D.append(model) models152D = [] model = SeResNet152D() model.load_state_dict(torch.load(MODEL_PATH_seresnet152d)['model']) model.eval() model.to(device) models152D.append(model) test_dataset = TestDataset(test, transform=get_transforms()) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4 , pin_memory=True) predictions200d = inference(models200D, test_loader, device) predictions152d = inference(models152D, test_loader, device) predictions =(2 * predictions200d + predictions152d)/ 3.0 target_cols = test.iloc[:, 1:12].columns.tolist() test[target_cols] = predictions test[['StudyInstanceUID'] + target_cols].to_csv('submission.csv', index=False) test.head() <load_from_csv>
X = train_data.drop(['label'], axis=1) y = train_data['label'] del train_data
Digit Recognizer
20,482,624
batch_size = 1 image_size = 512 tta = True submit =(len(pd.read_csv('.. /input/ranzcr-clip-catheter-line-classification/sample_submission.csv')) == 3582) enet_type = ['resnet200d']*5 model_path = [ '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold0_cv953.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold1_cv955.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold2_cv955.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold3_cv957.pth', '.. /input/resnet200d-baseline-benchmark-public/resnet200d_fold4_cv954.pth' ] fast_sub = True fast_sub_path = '.. /input/submissions/submission_RANZCR_CLiP.csv'<set_options>
X = X / 255.; test_data = test_data / 255. X = X.values.reshape(-1, 28, 28, 1) test_data = test_data.values.reshape(-1, 28, 28, 1 )
Digit Recognizer
20,482,624
sys.path.append('.. /input/pytorch-image-models/pytorch-image-models-master') sys.path.append('.. /input/timm-pytorch-image-models/pytorch-image-models-master') DEBUG = False %matplotlib inline device = torch.device('cuda')if not DEBUG else torch.device('cpu') print(device )<choose_model_class>
import tensorflow as tf
Digit Recognizer
20,482,624
class RANZCRResNet200D(nn.Module): def __init__(self, model_name='resnet200d', out_dim=11, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=False) n_features = self.model.fc.in_features self.model.global_pool = nn.Identity() self.model.fc = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(n_features, out_dim) def forward(self, x): bs = x.size(0) features = self.model(x) pooled_features = self.pooling(features ).view(bs, -1) output = self.fc(pooled_features) return output<concatenate>
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[28, 28, 1]), tf.keras.layers.BatchNormalization() , tf.keras.layers.MaxPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.MaxPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten() , tf.keras.layers.Dense(units=256, activation='relu'), tf.keras.layers.Dropout (.15), tf.keras.layers.Dense(units=128, activation='relu'), tf.keras.layers.Dropout (.1), tf.keras.layers.Dense(units=64, activation='relu'), tf.keras.layers.Dense(units=10, activation='softmax') ]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau( monitor='acc', patience=5, factor=0.5, min_lr=0.00001 )
Digit Recognizer
20,482,624
transforms_test = albumentations.Compose([ Resize(image_size, image_size), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2() ] )<load_from_csv>
y = tf.keras.utils.to_categorical(y, num_classes=10) datagen = tf.keras.preprocessing.image.ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X) Xtrain, Xvalid, ytrain, yvalid = train_test_split(X, y, test_size=.1, random_state=333 )
Digit Recognizer