Akyl commited on
Commit
5a1a242
Β·
1 Parent(s): e778278

fix issue 2

Browse files
Files changed (1) hide show
  1. app.py +31 -26
app.py CHANGED
@@ -7,24 +7,30 @@ from timeit import default_timer as timer
7
  from typing import Tuple, Dict
8
 
9
  # Setup class names
10
- class_names = ['pizza', 'steak', 'sushi']
11
 
12
  ### 2. Model and transforms preparation ###
 
13
  # Create EffNetB2 model
14
- effnetb2, effnetb2_transforms = create_effnetb2_model(len(class_names))
 
 
15
 
16
  # Load saved weights
17
  effnetb2.load_state_dict(
18
  torch.load(
19
- f='09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth',
20
- map_location=torch.device('cpu')
21
  )
22
  )
23
 
 
24
  ### 3. Predict function ###
 
25
  # Create predict function
26
  def predict(img) -> Tuple[Dict, float]:
27
- """Transforms and performs a prediction on img and returns prediction and time taken."""
 
28
  # Start the timer
29
  start_time = timer()
30
 
@@ -37,7 +43,7 @@ def predict(img) -> Tuple[Dict, float]:
37
  # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
38
  pred_probs = torch.softmax(effnetb2(img), dim=1)
39
 
40
- # Create a prediction label and prediction probability dictionary for each prediction class
41
  pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
42
 
43
  # Calculate the prediction time
@@ -46,29 +52,28 @@ def predict(img) -> Tuple[Dict, float]:
46
  # Return the prediction dictionary and prediction time
47
  return pred_labels_and_probs, pred_time
48
 
 
 
 
49
  # Create title, description and article strings
50
  title = "FoodVision Mini πŸ•πŸ₯©πŸ£"
51
  description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
52
  article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
53
 
54
- # Create examples list from 'examples/' directory
55
- example_list = [['examples/' + example] for example in os.listdir('examples')]
56
-
57
- # Create gradio demo
58
- demo = gr.Interface(
59
- fn=predict,
60
- inputs=gr.Image(type='pil'),
61
- outputs=[
62
- gr.Label(num_top_classes=3, label='Predictions'),
63
- gr.Number(label='Prediction time (s)')
64
- ],
65
- examples=example_list,
66
- title=title,
67
- description=description,
68
- article=article
69
- )
70
-
71
- # Launch the demo
72
  demo.launch()
73
-
74
-
 
7
  from typing import Tuple, Dict
8
 
9
  # Setup class names
10
+ class_names = ["pizza", "steak", "sushi"]
11
 
12
  ### 2. Model and transforms preparation ###
13
+
14
  # Create EffNetB2 model
15
+ effnetb2, effnetb2_transforms = create_effnetb2_model(
16
+ num_classes=3, # len(class_names) would also work
17
+ )
18
 
19
  # Load saved weights
20
  effnetb2.load_state_dict(
21
  torch.load(
22
+ f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
23
+ map_location=torch.device("cpu"), # load to CPU
24
  )
25
  )
26
 
27
+
28
  ### 3. Predict function ###
29
+
30
  # Create predict function
31
  def predict(img) -> Tuple[Dict, float]:
32
+ """Transforms and performs a prediction on img and returns prediction and time taken.
33
+ """
34
  # Start the timer
35
  start_time = timer()
36
 
 
43
  # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
44
  pred_probs = torch.softmax(effnetb2(img), dim=1)
45
 
46
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
47
  pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
48
 
49
  # Calculate the prediction time
 
52
  # Return the prediction dictionary and prediction time
53
  return pred_labels_and_probs, pred_time
54
 
55
+
56
+ ### 4. Gradio app ###
57
+
58
  # Create title, description and article strings
59
  title = "FoodVision Mini πŸ•πŸ₯©πŸ£"
60
  description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
61
  article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
62
 
63
+ # Create examples list from "examples/" directory
64
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
65
+
66
+ # Create the Gradio demo
67
+ demo = gr.Interface(fn=predict, # mapping function from input to output
68
+ inputs=gr.Image(type="pil"), # what are the inputs?
69
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
70
+ gr.Number(label="Prediction time (s)")],
71
+ # our fn has two outputs, therefore we have two outputs
72
+ # Create examples list from "examples/" directory
73
+ examples=example_list,
74
+ title=title,
75
+ description=description,
76
+ article=article)
77
+
78
+ # Launch the demo!
 
 
79
  demo.launch()