SWHL commited on
Commit
7e03ca9
1 Parent(s): a3569da

Update files

Browse files
Files changed (2) hide show
  1. app.py +27 -10
  2. images/ch_en_num.jpg +0 -0
app.py CHANGED
@@ -84,7 +84,24 @@ def visualize(image_path, boxes, txts, scores,
84
  return image_save
85
 
86
 
87
- def inference(img_path, box_thresh=0.5, unclip_ratio=1.6, text_score=0.5):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  img = cv2.imread(img_path)
89
  ocr_result, _ = rapid_ocr(img, box_thresh=box_thresh,
90
  unclip_ratio=unclip_ratio,
@@ -99,8 +116,7 @@ def inference(img_path, box_thresh=0.5, unclip_ratio=1.6, text_score=0.5):
99
  return img_save_path, output_text
100
 
101
 
102
- rapid_ocr = RapidOCR()
103
- examples = [['images/1.jpg']]
104
 
105
  with gr.Blocks(title='RapidOCR') as demo:
106
  gr.Markdown("""
@@ -112,10 +128,8 @@ with gr.Blocks(title='RapidOCR') as demo:
112
  - **unclip_ratio**: 控制文本检测框的大小,值越大,检测框整体越大。在出现框截断文字的情况,调大该值。取值范围:[1.5, 2.0]
113
  - **text_score**: 文本识别结果是正确的置信度,值越大,显示出的识别结果更准确。存在漏检时,调低该值。取值范围:[0, 1.0]
114
  ### 运行环境:
115
- - Python: 3.8
116
- - onnxruntime: 1.14.1
117
- - rapidocr_onnxruntime: 1.2.5""")
118
- gr.Markdown('### 超参数调节')
119
  with gr.Row():
120
  box_thresh = gr.Slider(minimum=0, maximum=1.0, value=0.5,
121
  label='box_thresh', step=0.1,
@@ -130,7 +144,7 @@ with gr.Blocks(title='RapidOCR') as demo:
130
  interactive=True,
131
  info='[0, 1.0]')
132
 
133
- gr.Markdown('### 模型选择')
134
  with gr.Row():
135
  text_det = gr.Dropdown(['ch_PP-OCRv3_det_infer.onnx',
136
  'ch_PP-OCRv2_det_infer.onnx',
@@ -138,6 +152,7 @@ with gr.Blocks(title='RapidOCR') as demo:
138
  label='选择文本检测模型',
139
  value='ch_PP-OCRv3_det_infer.onnx',
140
  interactive=True)
 
141
  text_rec = gr.Dropdown(['ch_PP-OCRv3_rec_infer.onnx',
142
  'ch_PP-OCRv2_rec_infer.onnx',
143
  'ch_ppocr_server_v2.0_rec_infer.onnx'],
@@ -151,9 +166,11 @@ with gr.Blocks(title='RapidOCR') as demo:
151
  out_txt = gr.outputs.Textbox(type='text', label='RecText')
152
  button = gr.Button('Submit')
153
  button.click(fn=inference,
154
- inputs=[input_img, box_thresh, unclip_ratio, text_score],
 
155
  outputs=[out_img, out_txt])
156
  gr.Examples(examples=examples,
157
- inputs=[input_img, box_thresh, unclip_ratio, text_score],
 
158
  outputs=[out_img, out_txt], fn=inference)
159
  demo.launch(debug=True, enable_queue=True)
 
84
  return image_save
85
 
86
 
87
+ def inference(img_path, box_thresh=0.5, unclip_ratio=1.6, text_score=0.5,
88
+ text_det=None, text_rec=None):
89
+ det_model_path = str(Path('models') / 'text_det' / text_det)
90
+ rec_model_path = str(Path('models') / 'text_rec' / text_rec)
91
+ if 'v2' in rec_model_path:
92
+ rec_image_shape = [3, 32, 320]
93
+ else:
94
+ rec_image_shape = [3, 48, 320]
95
+
96
+ print('Init Class')
97
+ s = time.time()
98
+ rapid_ocr = RapidOCR(det_model_path=det_model_path,
99
+ rec_model_path=rec_model_path,
100
+ rec_img_shape=rec_image_shape)
101
+ print(det_model_path, rec_model_path, rec_image_shape)
102
+ elapse = time.time() - s
103
+ print(elapse)
104
+
105
  img = cv2.imread(img_path)
106
  ocr_result, _ = rapid_ocr(img, box_thresh=box_thresh,
107
  unclip_ratio=unclip_ratio,
 
116
  return img_save_path, output_text
117
 
118
 
119
+ examples = [['images/1.jpg'], ['images/ch_en_num.jpg']]
 
120
 
121
  with gr.Blocks(title='RapidOCR') as demo:
122
  gr.Markdown("""
 
128
  - **unclip_ratio**: 控制文本检测框的大小,值越大,检测框整体越大。在出现框截断文字的情况,调大该值。取值范围:[1.5, 2.0]
129
  - **text_score**: 文本识别结果是正确的置信度,值越大,显示出的识别结果更准确。存在漏检时,调低该值。取值范围:[0, 1.0]
130
  ### 运行环境:
131
+ Python: 3.8 | onnxruntime: 1.14.1 | rapidocr_onnxruntime: 1.2.5""")
132
+ gr.Markdown('**超参数调节**')
 
 
133
  with gr.Row():
134
  box_thresh = gr.Slider(minimum=0, maximum=1.0, value=0.5,
135
  label='box_thresh', step=0.1,
 
144
  interactive=True,
145
  info='[0, 1.0]')
146
 
147
+ gr.Markdown('**模型选择**')
148
  with gr.Row():
149
  text_det = gr.Dropdown(['ch_PP-OCRv3_det_infer.onnx',
150
  'ch_PP-OCRv2_det_infer.onnx',
 
152
  label='选择文本检测模型',
153
  value='ch_PP-OCRv3_det_infer.onnx',
154
  interactive=True)
155
+
156
  text_rec = gr.Dropdown(['ch_PP-OCRv3_rec_infer.onnx',
157
  'ch_PP-OCRv2_rec_infer.onnx',
158
  'ch_ppocr_server_v2.0_rec_infer.onnx'],
 
166
  out_txt = gr.outputs.Textbox(type='text', label='RecText')
167
  button = gr.Button('Submit')
168
  button.click(fn=inference,
169
+ inputs=[input_img, box_thresh, unclip_ratio, text_score,
170
+ text_det, text_rec],
171
  outputs=[out_img, out_txt])
172
  gr.Examples(examples=examples,
173
+ inputs=[input_img, box_thresh, unclip_ratio, text_score,
174
+ text_det, text_rec],
175
  outputs=[out_img, out_txt], fn=inference)
176
  demo.launch(debug=True, enable_queue=True)
images/ch_en_num.jpg ADDED