pavi156 commited on
Commit
4ee77f1
·
1 Parent(s): 797207c

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +73 -0
  2. cifar_net.pth +3 -0
  3. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[2]:
5
+
6
+
7
+ # In[3]:
8
+
9
+
10
+ import gradio as gr
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.optim as optim
14
+ import torchvision
15
+ import torchvision.transforms as transforms
16
+
17
+ class Net(nn.Module):
18
+ def __init__(self):
19
+ super(Net, self).__init__()
20
+ self.conv1 = nn.Conv2d(3, 6, 5)
21
+ self.pool = nn.MaxPool2d(2, 2)
22
+ self.conv2 = nn.Conv2d(6, 16, 5)
23
+ # Modify fc1 to match the size in the saved checkpoint
24
+ self.fc1 = nn.Linear(400, 120)
25
+ # Modify fc2 to match the size in the saved checkpoint
26
+ self.fc2 = nn.Linear(120, 84)
27
+ # Modify fc3 to match the size in the saved checkpoint
28
+ self.fc3 = nn.Linear(84, 10)
29
+
30
+ def forward(self, x):
31
+ x = self.pool(torch.relu(self.conv1(x)))
32
+ x = self.pool(torch.relu(self.conv2(x)))
33
+ x = x.view(x.shape[0], -1)
34
+ x = torch.relu(self.fc1(x))
35
+ x = torch.relu(self.fc2(x))
36
+ x = self.fc3(x)
37
+ return x
38
+
39
+ # Load the trained model
40
+ model = Net()
41
+ model.load_state_dict(torch.load("cifar_net.pth"))
42
+ model.eval()
43
+
44
+ # Define the transformation to be applied to input images
45
+ preprocess = transforms.Compose([
46
+ transforms.ToTensor(),
47
+ transforms.Resize((32, 32)),
48
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
49
+ ])
50
+
51
+ # Define the CIFAR-10 class names
52
+ classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
53
+
54
+ # Define a function to make predictions on input images
55
+ def classify_image(image):
56
+ img_tensor = preprocess(image)
57
+ img_tensor = img_tensor.unsqueeze(0)
58
+ output = model(img_tensor)
59
+ _, predicted = torch.max(output, dim=1)
60
+ return classes[predicted[0]] # Return as a list
61
+
62
+ # Create Gradio interface
63
+ iface = gr.Interface(fn=classify_image, inputs="image", outputs="text")
64
+
65
+ # Launch the interface
66
+ iface.launch()
67
+
68
+
69
+ # In[ ]:
70
+
71
+
72
+
73
+
cifar_net.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a39b10e26fc76a2d0f097dfd792ff7dac5a7c79ecbb1732017a726f3c1fafdc5
3
+ size 251167
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Usage :pip install -r requiemnets.txt
2
+
3
+ gradio == 3.36.1
4
+ pillow == 9.5.0
5
+ torch == 2.0.1
6
+ torchvision == 0.15.2
7
+ image == 1.5.33