Merge branch 'main' of https://huggingface.co/imessam/OrientationClassifier
Browse files- Coding_Challenge_for_Fatima_Fellowship.html +0 -0
- README.md +15 -1
- model.py +0 -44
- transforms.py +0 -32
- utils.py +0 -203
Coding_Challenge_for_Fatima_Fellowship.html
DELETED
The diff for this file is too large to render.
See raw diff
|
|
README.md
CHANGED
@@ -1 +1,15 @@
|
|
1 |
-
# Orientation Classifier
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Orientation Classifier
|
2 |
+
---
|
3 |
+
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
tags:
|
7 |
+
- image-classification
|
8 |
+
license: apache-2.0
|
9 |
+
datasets:
|
10 |
+
- cifar10
|
11 |
+
metrics:
|
12 |
+
- accuracy
|
13 |
+
- f1
|
14 |
+
|
15 |
+
---
|
model.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import numpy as np
|
3 |
-
import tensorflow as tf
|
4 |
-
from tensorflow.keras.layers import *
|
5 |
-
|
6 |
-
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
class OrientationClassifier(tf.keras.Model):
|
11 |
-
|
12 |
-
|
13 |
-
def __init__(self,input_shape,hidden_dim,no_classes=1,isTrainable = False):
|
14 |
-
super().__init__()
|
15 |
-
resnet = tf.keras.applications.resnet50.ResNet50(include_top=False,input_shape=input_shape)
|
16 |
-
resnet.trainable = isTrainable
|
17 |
-
|
18 |
-
self.featureExtractor = tf.keras.models.Sequential([
|
19 |
-
resnet,
|
20 |
-
GlobalAveragePooling2D()
|
21 |
-
])
|
22 |
-
|
23 |
-
self.classifier = tf.keras.models.Sequential([
|
24 |
-
Dense(hidden_dim,activation = 'relu'),
|
25 |
-
BatchNormalization(),
|
26 |
-
|
27 |
-
Dense(hidden_dim/2,activation = 'relu'),
|
28 |
-
BatchNormalization(),
|
29 |
-
|
30 |
-
Dense(hidden_dim/4,activation = 'relu'),
|
31 |
-
BatchNormalization(),
|
32 |
-
|
33 |
-
Dense(no_classes,activation = 'sigmoid')
|
34 |
-
])
|
35 |
-
|
36 |
-
|
37 |
-
def call(self, inputs):
|
38 |
-
|
39 |
-
x = self.featureExtractor(inputs)
|
40 |
-
|
41 |
-
preds = self.classifier(x)
|
42 |
-
|
43 |
-
return preds
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
transforms.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
def normalize(image,label):
|
7 |
-
|
8 |
-
return tf.cast(image, tf.float32) / 255., label
|
9 |
-
|
10 |
-
|
11 |
-
def resize(image,label):
|
12 |
-
|
13 |
-
return tf.image.resize(image,size=(100,100)), label
|
14 |
-
|
15 |
-
|
16 |
-
def flip(image,label):
|
17 |
-
|
18 |
-
new_label = tf.random.uniform(shape=(), minval=0, maxval=2, dtype=tf.int32)
|
19 |
-
|
20 |
-
img = image
|
21 |
-
|
22 |
-
if new_label == 1:
|
23 |
-
img = tf.image.flip_up_down(img)
|
24 |
-
|
25 |
-
return img,new_label
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils.py
DELETED
@@ -1,203 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import tensorflow as tf
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
-
import gc
|
5 |
-
|
6 |
-
|
7 |
-
classes = ["UN-FLIPPED","FLIPPED"]
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
def showSample(data):
|
13 |
-
|
14 |
-
|
15 |
-
for i,(image,label) in enumerate(data.unbatch().as_numpy_iterator()):
|
16 |
-
|
17 |
-
print(classes[label])
|
18 |
-
plt.imshow(image)
|
19 |
-
plt.show()
|
20 |
-
|
21 |
-
if i == 3:
|
22 |
-
break
|
23 |
-
|
24 |
-
tf.keras.backend.clear_session()
|
25 |
-
gc.collect()
|
26 |
-
|
27 |
-
def checkBalance(data):
|
28 |
-
|
29 |
-
|
30 |
-
labels = []
|
31 |
-
|
32 |
-
for i,(image,label) in enumerate(data.unbatch().as_numpy_iterator()):
|
33 |
-
|
34 |
-
labels.append(label)
|
35 |
-
|
36 |
-
|
37 |
-
values,count = np.unique(np.array(labels), return_counts=True)
|
38 |
-
|
39 |
-
for i,value in enumerate(values):
|
40 |
-
print(f"{classes[value]} : {count[i]}")
|
41 |
-
|
42 |
-
tf.keras.backend.clear_session()
|
43 |
-
gc.collect()
|
44 |
-
|
45 |
-
|
46 |
-
def prepareData(data, transforms, batch_size, isTrain= False, size=None):
|
47 |
-
|
48 |
-
ds = data
|
49 |
-
|
50 |
-
for transform in transforms:
|
51 |
-
ds = ds.map(transform)
|
52 |
-
ds = ds.cache()
|
53 |
-
ds = ds.batch(batch_size)
|
54 |
-
ds = ds.prefetch(tf.data.AUTOTUNE)
|
55 |
-
|
56 |
-
if isTrain:
|
57 |
-
ds = ds.shuffle(size)
|
58 |
-
|
59 |
-
return ds
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
def train(data,classifier,params,optimizer,loss,epochs,modelName = "OrientationClassifier",callback = None, runs=3):
|
65 |
-
|
66 |
-
train,val = data
|
67 |
-
|
68 |
-
history = []
|
69 |
-
losses = []
|
70 |
-
accs = []
|
71 |
-
|
72 |
-
callbacks = []
|
73 |
-
|
74 |
-
if callback is not None:
|
75 |
-
callbacks.append(callback)
|
76 |
-
|
77 |
-
for i in range(runs):
|
78 |
-
print(f"######## run {i+1} ##########")
|
79 |
-
|
80 |
-
callbacks.append(tf.keras.callbacks.ModelCheckpoint(
|
81 |
-
filepath=f"models/{modelName} run {(i+1)}",
|
82 |
-
save_weights_only=True,
|
83 |
-
monitor='val_loss',
|
84 |
-
mode='min',
|
85 |
-
save_best_only=True
|
86 |
-
))
|
87 |
-
|
88 |
-
model = classifier(*params)
|
89 |
-
tf.random.set_seed(i+1)
|
90 |
-
|
91 |
-
model.compile(optimizer=optimizer,loss=loss,metrics="accuracy")
|
92 |
-
hist = model.fit(train,epochs=epochs,validation_data=val,callbacks=callbacks)
|
93 |
-
|
94 |
-
losses.append(np.amin(hist.history["val_loss"]))
|
95 |
-
accs.append(np.amax(hist.history["val_accuracy"]))
|
96 |
-
history.append(hist)
|
97 |
-
tf.keras.backend.clear_session()
|
98 |
-
|
99 |
-
|
100 |
-
idx = np.argmin(losses)
|
101 |
-
bestModel = model
|
102 |
-
bestModel.load_weights(f"models/{modelName} run {(idx+1)}")
|
103 |
-
loss,acc = bestModel.evaluate(val)
|
104 |
-
|
105 |
-
print(f"best model is on run {idx+1} with validation loss : {loss} and validation accuracy : {acc}")
|
106 |
-
del model
|
107 |
-
|
108 |
-
tf.keras.backend.clear_session()
|
109 |
-
gc.collect()
|
110 |
-
|
111 |
-
|
112 |
-
return history
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
def plotHistory(histories):
|
119 |
-
|
120 |
-
for i,history in enumerate(histories):
|
121 |
-
|
122 |
-
print(f"history on run {i+1} : ")
|
123 |
-
|
124 |
-
# summarize history for accuracy
|
125 |
-
plt.plot(history.history['accuracy'])
|
126 |
-
plt.plot(history.history['val_accuracy'])
|
127 |
-
plt.title('model accuracy')
|
128 |
-
plt.ylabel('accuracy')
|
129 |
-
plt.xlabel('epoch')
|
130 |
-
plt.legend(['train', 'val'], loc='upper left')
|
131 |
-
plt.show()
|
132 |
-
|
133 |
-
# summarize history for loss
|
134 |
-
plt.plot(history.history['loss'])
|
135 |
-
plt.plot(history.history['val_loss'])
|
136 |
-
plt.title('model loss')
|
137 |
-
plt.ylabel('loss')
|
138 |
-
plt.xlabel('epoch')
|
139 |
-
plt.legend(['train', 'val'], loc='upper left')
|
140 |
-
plt.show()
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
def getMetrics(true,preds,thresh=0.5):
|
145 |
-
|
146 |
-
acc = 0
|
147 |
-
TP = 0
|
148 |
-
TN = 0
|
149 |
-
FP = 0
|
150 |
-
FN = 0
|
151 |
-
recall = 0
|
152 |
-
precision = 0
|
153 |
-
f1 = 0
|
154 |
-
|
155 |
-
logits = np.array(preds>thresh,dtype = np.int)
|
156 |
-
|
157 |
-
for i,label in enumerate(iter(true)):
|
158 |
-
|
159 |
-
if label == 1:
|
160 |
-
if logits[i] == 1:
|
161 |
-
TP += 1
|
162 |
-
else:
|
163 |
-
FN += 1
|
164 |
-
else:
|
165 |
-
if logits[i] == 1:
|
166 |
-
FP += 1
|
167 |
-
else:
|
168 |
-
TN += 1
|
169 |
-
|
170 |
-
confusion_matrix = [[TP,FN],[FP,TN]]
|
171 |
-
acc = (TP + TN)/(TP+TN+FP+FN)
|
172 |
-
recall = TP/(TP+FN)
|
173 |
-
precision = TP/(TP+FP)
|
174 |
-
f1 = 2 * ((precision*recall)/(precision+recall))
|
175 |
-
|
176 |
-
s = ""
|
177 |
-
for i in range(2):
|
178 |
-
for j in range(2):
|
179 |
-
s+=f"{confusion_matrix[i][j]}\t"
|
180 |
-
s+="\n"
|
181 |
-
|
182 |
-
print(f"Accuracy : {acc} , recall : {recall} , precision : {precision} , f1 : {f1} , Confusion Matrix : \n{s}")
|
183 |
-
|
184 |
-
return acc,confusion_matrix,recall,precision,f1
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
def loadWeights(model,optimizer,loss,weights_path):
|
189 |
-
|
190 |
-
model.load_weights(weights_path)
|
191 |
-
model.compile(optimizer = optimizer,loss = loss)
|
192 |
-
|
193 |
-
return model
|
194 |
-
|
195 |
-
|
196 |
-
def evaluate(model,data):
|
197 |
-
|
198 |
-
images = data.map(lambda image,label:image)
|
199 |
-
true = data.map(lambda image,label:label).unbatch()
|
200 |
-
|
201 |
-
preds = model.predict(images)
|
202 |
-
|
203 |
-
return getMetrics(true,preds)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|