rgny commited on
Commit
b56a0a9
·
verified ·
1 Parent(s): f2c50d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -21
app.py CHANGED
@@ -1,33 +1,82 @@
 
 
 
 
 
 
1
  import gradio as gr
 
2
  import torch
3
- import torchvision
4
  from model import create_model
5
  from timeit import default_timer as timer
 
 
 
 
 
 
6
 
7
- model,transform=create_model()
8
- model=model.to("cpu")
9
- model.load_state_dict(
 
 
 
 
10
  torch.load(
11
- f="deneme_modeli.pth",
12
  map_location=torch.device("cpu"), # load to CPU
13
  )
14
  )
15
 
16
- class_names = ["pizza","steak","sushi"]
17
- def predict(image):
18
- start=timer()
19
- image=transform(image).unsqueeze(0).to("cpu")
20
- output=model(image)
21
- preds={class_names[i]:torch.softmax(output,dim=1)[0][i].item() for i in range(len(class_names))}
22
- time=timer()-start
23
- return preds,round(time,3)
24
-
25
- inputs = gr.Image(type="pil", label = "Resim")
26
- outputs = [gr.Label(num_top_classes=3,label="Tahminler"),gr.Number(label="Süre")]
27
- demo=gr.Interface(fn = predict,
28
- inputs=inputs,
29
- outputs=outputs,
30
- examples=[["examples/673127.jpg"],["examples/690177.jpg"]],
31
- title="Yeni Model")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
 
33
  demo.launch()
 
1
+
2
+
3
+
4
+
5
+
6
+ ### 1. Imports and class names setup ###
7
  import gradio as gr
8
+ import os
9
  import torch
10
+
11
  from model import create_model
12
  from timeit import default_timer as timer
13
+ from typing import Tuple, Dict
14
+
15
+ # Setup class names
16
+ class_names = ["pizza", "steak", "sushi"]
17
+
18
+ ### 2. Model and transforms preparation ###
19
 
20
+ # Create EffNetB2 model
21
+ effnetb2, effnetb2_transforms = create_model(
22
+ num_classes=3, # len(class_names) would also work
23
+ )
24
+
25
+ # Load saved weights
26
+ effnetb2.load_state_dict(
27
  torch.load(
28
+ f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
29
  map_location=torch.device("cpu"), # load to CPU
30
  )
31
  )
32
 
33
+ ### 3. Predict function ###
34
+
35
+ # Create predict function
36
+ def predict(img) -> Tuple[Dict, float]:
37
+ """Transforms and performs a prediction on img and returns prediction and time taken.
38
+ """
39
+ # Start the timer
40
+ start_time = timer()
41
+
42
+ # Transform the target image and add a batch dimension
43
+ img = effnetb2_transforms(img).unsqueeze(0)
44
+
45
+ # Put model into evaluation mode and turn on inference mode
46
+ effnetb2.eval()
47
+ with torch.inference_mode():
48
+ # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
49
+ pred_probs = torch.softmax(effnetb2(img), dim=1)
50
+
51
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
52
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
53
+
54
+ # Calculate the prediction time
55
+ pred_time = round(timer() - start_time, 5)
56
+
57
+ # Return the prediction dictionary and prediction time
58
+ return pred_labels_and_probs, pred_time
59
+
60
+ ### 4. Gradio app ###
61
+
62
+ # Create title, description and article strings
63
+ title = "FoodVision Mini 🍕🥩🍣"
64
+ description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
65
+ article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
66
+
67
+ # Create examples list from "examples/" directory
68
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
69
+
70
+ # Create the Gradio demo
71
+ demo = gr.Interface(fn=predict, # mapping function from input to output
72
+ inputs=gr.Image(type="pil"), # what are the inputs?
73
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
74
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
75
+ # Create examples list from "examples/" directory
76
+ examples=example_list,
77
+ title=title,
78
+ description=description,
79
+ article=article)
80
 
81
+ # Launch the demo!
82
  demo.launch()