Spaces:
Herta83
/
Runtime error

admin commited on
Commit
278c80b
1 Parent(s): 46493cf
Files changed (6) hide show
  1. .gitattributes +10 -11
  2. .gitignore +4 -0
  3. README.md +3 -3
  4. app.py +101 -0
  5. model.py +158 -0
  6. requirements.txt +4 -0
.gitattributes CHANGED
@@ -1,35 +1,34 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *.tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.db* filter=lfs diff=lfs merge=lfs -text
29
+ *.ark* filter=lfs diff=lfs merge=lfs -text
30
+ **/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
31
+ **/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
32
+ **/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
33
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
34
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ __pycache__/*
2
+ *.pth
3
+ flagged/*
4
+ rename.sh
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
  title: SVHN Recognition
3
- emoji:
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.12.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: SVHN Recognition
3
+ emoji: 🚪
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.36.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
+ The Doorplate Recognition model is implemented using a deep convolutional neural network in PyTorch, with the objective of discerning multi-digit doorplate numbers from street view images. Utilizing the SVHN dataset extracted from Google Street View house numbers, the model is trained to identify sets of Arabic digits (0-9) within each image. The PyTorch implementation exhibits a commendable level of accuracy, achieving a tested precision of up to 89%. When users upload images containing doorplate numbers and submit them, the system yields precise recognition results for the digits present in the doorplate. This implementation provides a robust and user-friendly solution for doorplate number identification, demonstrating practical applications in the realm of image-based digit recognition.
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import random
4
+ import warnings
5
+ import gradio as gr
6
+ from PIL import Image
7
+ from model import Model
8
+ from torchvision import transforms
9
+ from modelscope import snapshot_download
10
+
11
+
12
+ MODEL_DIR = snapshot_download("MuGeminorum/svhn", cache_dir="./__pycache__")
13
+
14
+
15
+ def infer(input_img: str, checkpoint_file: str):
16
+ try:
17
+ model = Model()
18
+ model.restore(f"{MODEL_DIR}/{checkpoint_file}")
19
+ outstr = ""
20
+ with torch.no_grad():
21
+ transform = transforms.Compose(
22
+ [
23
+ transforms.Resize([64, 64]),
24
+ transforms.CenterCrop([54, 54]),
25
+ transforms.ToTensor(),
26
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
27
+ ]
28
+ )
29
+ image = Image.open(input_img)
30
+ image = image.convert("RGB")
31
+ image = transform(image)
32
+ images = image.unsqueeze(dim=0)
33
+ (
34
+ length_logits,
35
+ digit1_logits,
36
+ digit2_logits,
37
+ digit3_logits,
38
+ digit4_logits,
39
+ digit5_logits,
40
+ ) = model.eval()(images)
41
+ length_prediction = length_logits.max(1)[1]
42
+ digit1_prediction = digit1_logits.max(1)[1]
43
+ digit2_prediction = digit2_logits.max(1)[1]
44
+ digit3_prediction = digit3_logits.max(1)[1]
45
+ digit4_prediction = digit4_logits.max(1)[1]
46
+ digit5_prediction = digit5_logits.max(1)[1]
47
+ output = [
48
+ digit1_prediction.item(),
49
+ digit2_prediction.item(),
50
+ digit3_prediction.item(),
51
+ digit4_prediction.item(),
52
+ digit5_prediction.item(),
53
+ ]
54
+
55
+ for i in range(length_prediction.item()):
56
+ outstr += str(output[i])
57
+
58
+ return outstr
59
+
60
+ except Exception as e:
61
+ return f"{e}"
62
+
63
+
64
+ def get_files(dir_path=MODEL_DIR, ext=".pth"):
65
+ files_and_folders = os.listdir(dir_path)
66
+ outputs = []
67
+ for file in files_and_folders:
68
+ if file.endswith(ext):
69
+ outputs.append(file)
70
+
71
+ return outputs
72
+
73
+
74
+ if __name__ == "__main__":
75
+ warnings.filterwarnings("ignore")
76
+ models = get_files()
77
+ images = get_files(f"{MODEL_DIR}/examples", ".png")
78
+ samples = []
79
+ for img in images:
80
+ samples.append(
81
+ [
82
+ f"{MODEL_DIR}/examples/{img}",
83
+ models[random.randint(0, len(models) - 1)],
84
+ ]
85
+ )
86
+
87
+ gr.Interface(
88
+ fn=infer,
89
+ inputs=[
90
+ gr.Image(label="上传图片 Upload an image", type="filepath"),
91
+ gr.Dropdown(
92
+ label="选择权重 Select a model",
93
+ choices=models,
94
+ value=models[0],
95
+ ),
96
+ ],
97
+ outputs=gr.Textbox(label="识别结果 Recognition result", show_copy_button=True),
98
+ examples=samples,
99
+ allow_flagging="never",
100
+ cache_examples=False,
101
+ ).launch()
model.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import torch
4
+ import torch.jit
5
+ import torch.nn as nn
6
+
7
+
8
+ class Model(torch.jit.ScriptModule):
9
+ CHECKPOINT_FILENAME_PATTERN = "model-{}.pth"
10
+
11
+ __constants__ = [
12
+ "_hidden1",
13
+ "_hidden2",
14
+ "_hidden3",
15
+ "_hidden4",
16
+ "_hidden5",
17
+ "_hidden6",
18
+ "_hidden7",
19
+ "_hidden8",
20
+ "_hidden9",
21
+ "_hidden10",
22
+ "_features",
23
+ "_classifier",
24
+ "_digit_length",
25
+ "_digit1",
26
+ "_digit2",
27
+ "_digit3",
28
+ "_digit4",
29
+ "_digit5",
30
+ ]
31
+
32
+ def __init__(self):
33
+ super(Model, self).__init__()
34
+
35
+ self._hidden1 = nn.Sequential(
36
+ nn.Conv2d(in_channels=3, out_channels=48, kernel_size=5, padding=2),
37
+ nn.BatchNorm2d(num_features=48),
38
+ nn.ReLU(),
39
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
40
+ nn.Dropout(0.2),
41
+ )
42
+ self._hidden2 = nn.Sequential(
43
+ nn.Conv2d(in_channels=48, out_channels=64, kernel_size=5, padding=2),
44
+ nn.BatchNorm2d(num_features=64),
45
+ nn.ReLU(),
46
+ nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
47
+ nn.Dropout(0.2),
48
+ )
49
+ self._hidden3 = nn.Sequential(
50
+ nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, padding=2),
51
+ nn.BatchNorm2d(num_features=128),
52
+ nn.ReLU(),
53
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
54
+ nn.Dropout(0.2),
55
+ )
56
+ self._hidden4 = nn.Sequential(
57
+ nn.Conv2d(in_channels=128, out_channels=160, kernel_size=5, padding=2),
58
+ nn.BatchNorm2d(num_features=160),
59
+ nn.ReLU(),
60
+ nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
61
+ nn.Dropout(0.2),
62
+ )
63
+ self._hidden5 = nn.Sequential(
64
+ nn.Conv2d(in_channels=160, out_channels=192, kernel_size=5, padding=2),
65
+ nn.BatchNorm2d(num_features=192),
66
+ nn.ReLU(),
67
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
68
+ nn.Dropout(0.2),
69
+ )
70
+ self._hidden6 = nn.Sequential(
71
+ nn.Conv2d(in_channels=192, out_channels=192, kernel_size=5, padding=2),
72
+ nn.BatchNorm2d(num_features=192),
73
+ nn.ReLU(),
74
+ nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
75
+ nn.Dropout(0.2),
76
+ )
77
+ self._hidden7 = nn.Sequential(
78
+ nn.Conv2d(in_channels=192, out_channels=192, kernel_size=5, padding=2),
79
+ nn.BatchNorm2d(num_features=192),
80
+ nn.ReLU(),
81
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
82
+ nn.Dropout(0.2),
83
+ )
84
+ self._hidden8 = nn.Sequential(
85
+ nn.Conv2d(in_channels=192, out_channels=192, kernel_size=5, padding=2),
86
+ nn.BatchNorm2d(num_features=192),
87
+ nn.ReLU(),
88
+ nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
89
+ nn.Dropout(0.2),
90
+ )
91
+ self._hidden9 = nn.Sequential(nn.Linear(192 * 7 * 7, 3072), nn.ReLU())
92
+ self._hidden10 = nn.Sequential(nn.Linear(3072, 3072), nn.ReLU())
93
+
94
+ self._digit_length = nn.Sequential(nn.Linear(3072, 7))
95
+ self._digit1 = nn.Sequential(nn.Linear(3072, 11))
96
+ self._digit2 = nn.Sequential(nn.Linear(3072, 11))
97
+ self._digit3 = nn.Sequential(nn.Linear(3072, 11))
98
+ self._digit4 = nn.Sequential(nn.Linear(3072, 11))
99
+ self._digit5 = nn.Sequential(nn.Linear(3072, 11))
100
+
101
+ @torch.jit.script_method
102
+ def forward(self, x):
103
+ x = self._hidden1(x)
104
+ x = self._hidden2(x)
105
+ x = self._hidden3(x)
106
+ x = self._hidden4(x)
107
+ x = self._hidden5(x)
108
+ x = self._hidden6(x)
109
+ x = self._hidden7(x)
110
+ x = self._hidden8(x)
111
+ x = x.view(x.size(0), 192 * 7 * 7)
112
+ x = self._hidden9(x)
113
+ x = self._hidden10(x)
114
+
115
+ length_logits = self._digit_length(x)
116
+ digit1_logits = self._digit1(x)
117
+ digit2_logits = self._digit2(x)
118
+ digit3_logits = self._digit3(x)
119
+ digit4_logits = self._digit4(x)
120
+ digit5_logits = self._digit5(x)
121
+
122
+ return (
123
+ length_logits,
124
+ digit1_logits,
125
+ digit2_logits,
126
+ digit3_logits,
127
+ digit4_logits,
128
+ digit5_logits,
129
+ )
130
+
131
+ def store(self, path_to_dir, step, maximum=5):
132
+ path_to_models = glob.glob(
133
+ os.path.join(path_to_dir, Model.CHECKPOINT_FILENAME_PATTERN.format("*"))
134
+ )
135
+ if len(path_to_models) == maximum:
136
+ min_step = min(
137
+ [
138
+ int(path_to_model.split("\\")[-1][6:-4])
139
+ for path_to_model in path_to_models
140
+ ]
141
+ )
142
+ path_to_min_step_model = os.path.join(
143
+ path_to_dir, Model.CHECKPOINT_FILENAME_PATTERN.format(min_step)
144
+ )
145
+ os.remove(path_to_min_step_model)
146
+
147
+ path_to_checkpoint_file = os.path.join(
148
+ path_to_dir, Model.CHECKPOINT_FILENAME_PATTERN.format(step)
149
+ )
150
+ torch.save(self.state_dict(), path_to_checkpoint_file)
151
+ return path_to_checkpoint_file
152
+
153
+ def restore(self, path_to_checkpoint_file):
154
+ self.load_state_dict(
155
+ torch.load(path_to_checkpoint_file, map_location=torch.device("cpu"))
156
+ )
157
+ step = int(path_to_checkpoint_file.split("model-")[-1][:-4])
158
+ return step
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ pillow
3
+ torch
4
+ torchvision