Cherryblade29 commited on
Commit
981c340
1 Parent(s): 732b792

Upload 9 files

Browse files
Files changed (9) hide show
  1. 1001116.jpg +0 -0
  2. 100274.jpg +0 -0
  3. 1203702.jpg +0 -0
  4. Deplaoy torch model.ipynb +0 -0
  5. README.md +11 -1
  6. app.py +69 -0
  7. effnetb2.pth +3 -0
  8. model.py +31 -0
  9. requirements.txt +3 -0
1001116.jpg ADDED
100274.jpg ADDED
1203702.jpg ADDED
Deplaoy torch model.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
README.md CHANGED
@@ -1,3 +1,13 @@
1
  ---
2
- license: openrail
 
 
 
 
 
 
 
 
3
  ---
 
 
 
1
  ---
2
+ title: FoodVision Mini
3
+ emoji: 📈
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 4.38.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
  ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 1. Imports and class names setup
2
+ import gradio as gr
3
+ import os
4
+ import torch
5
+
6
+ from model import create_effnetb2_model
7
+ from timeit import default_timer as timer
8
+ from typing import Tuple , Dict
9
+
10
+ # Setup class names
11
+ class_names = ['pizza','steak','sushi']
12
+
13
+ # Model and transforms preparation
14
+ # Create EffNetB2 model
15
+ effnetb2 , effnetb2_transforms = create_effnetb2_model(num_classes=len(class_names))
16
+
17
+ # load and save weights
18
+ <<<<<<< HEAD
19
+ effnetb2.load_state_dict(torch.load(os.path.join("effnetb2.pth"),map_location=torch.device('cpu')))
20
+ =======
21
+ effnetb2.load_state_dict(torch.load("effnetb2.pth",map_location=torch.device('cpu')))
22
+ >>>>>>> f57d3888756f20e9db37eb8ce02739685876fb20
23
+
24
+ # Predict function
25
+ def predict(img):
26
+ """
27
+ Transforms and performs a prediction on img and returns prediction and time taken.
28
+ """
29
+ # Start timer
30
+ start_time = timer()
31
+
32
+ # transform the target image and add a batch dimension
33
+ img = effnetb2_transforms(img).unsqueeze(0)
34
+
35
+ # put model into evaluation mode and turn on inference mode
36
+ effnetb2.eval()
37
+ with torch.inference_mode():
38
+ # pass the transformed image through the model and turn the pred logits into prediction probabilities
39
+ pred_probs = torch.softmax(effnetb2(img), dim=1)
40
+ # create a prediction label and prediction probability dictionary
41
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
42
+ # calculate time
43
+ pred_time = round(timer() - start_time , 5)
44
+ # return the prediction dictionary
45
+ return pred_labels_and_probs, pred_time
46
+
47
+ ## Gradio app
48
+
49
+ # Create title, description and article strings
50
+ title = "FoodVision Mini 🍕🥩🍣"
51
+ description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
52
+ article = "Created "
53
+
54
+ # Create examples list from "examples/" directory
55
+ #example_list = [["examples/" + example] for example in os.listdir("examples")]
56
+
57
+ # Create the Gradio demo
58
+ demo = gr.Interface(fn=predict, # mapping function from input to output
59
+ inputs=gr.Image(type="pil"), # what are the inputs?
60
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
61
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
62
+ # Create examples list from "examples/" directory
63
+ #examples=example_list,
64
+ title=title,
65
+ description=description,
66
+ article=article)
67
+
68
+ # Launch the demo!
69
+ demo.launch()
effnetb2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ae45551ee9305afa43726d6802c0d97701a32641b6a8adab6c3e9e023018ae
3
+ size 31274298
model.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torch import nn
4
+ def create_effnetb2_model(num_classes : int ,
5
+ seed : int=42):
6
+ """
7
+ Create an EffNetB2 feature extractor model and move it to the target device.
8
+ Args:
9
+ num_classes (int, optional): number of classes in the classifier head.
10
+ Defaults to 3.
11
+ seed (int, optional): random seed value. Defaults to 42.
12
+
13
+ Returns:
14
+ model (torch.nn.Module): EffNetB2 feature extractor model.
15
+ transforms (torchvision.transforms): EffNetB2 image transforms.
16
+ """
17
+ # Create EffNetB2 pretrained weights , transforms and model
18
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
19
+ transforms = weights.transforms()
20
+ model = torchvision.models.efficientnet_b2(weights)
21
+
22
+ # Freeze all layers in base model
23
+ for param in model.parameters():
24
+ param.requires_grad = False
25
+ # change classifier head with random seed for reproducilityù
26
+ torch.manual_seed(seed)
27
+ model.classifier = nn.Sequential(
28
+ nn.Dropout(p=0.2, inplace=True),
29
+ nn.Linear(in_features=1408, out_features=num_classes, bias=True)
30
+ )
31
+ return model, transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==3.21.0
2
+ torch==1.13.1
3
+ torchvision==0.14.1