Solo448 commited on
Commit
1ea460d
·
verified ·
1 Parent(s): b0a2b83

Delete working_ocr.py

Browse files
Files changed (1) hide show
  1. working_ocr.py +0 -282
working_ocr.py DELETED
@@ -1,282 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """Working OCR.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1uAUzcENIQdIKo5G6ZIBdDcaeL0c_5Qkz
8
- """
9
-
10
- #!pip install git+https://github.com/huggingface/transformers
11
-
12
- #from google.colab import drive
13
- #drive.mount('/content/drive')
14
-
15
- !pip install --upgrade transformers
16
-
17
- !pip install -q torch flash-attn qwen-vl-utils spaces gradio tiktoken verovio
18
-
19
- import gradio as gr
20
- import spaces
21
- import json
22
- from transformers import AutoModel, AutoTokenizer, Qwen2VLForConditionalGeneration, AutoProcessor
23
- import torch
24
- from PIL import Image
25
- import numpy as np
26
- import os
27
- import base64
28
- import io
29
- import uuid
30
- import tempfile
31
- import time
32
- import shutil
33
- from pathlib import Path
34
- import tiktoken
35
- import verovio
36
- model_name = "ucaslcl/GOT-OCR2_0"
37
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
38
- model = AutoModel.from_pretrained(model_name, trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True).eval().cuda()
39
-
40
- UPLOAD_FOLDER = "./uploads"
41
- RESULTS_FOLDER = "./results"
42
-
43
- for folder in [UPLOAD_FOLDER, RESULTS_FOLDER]:
44
- if not os.path.exists(folder):
45
- os.makedirs(folder)
46
-
47
- def image_to_base64(image):
48
- buffered = io.BytesIO()
49
- image.save(buffered, format="PNG")
50
- return base64.b64encode(buffered.getvalue()).decode()
51
-
52
- q_model_name = "Qwen/Qwen2-VL-2B-Instruct"
53
- q_model = Qwen2VLForConditionalGeneration.from_pretrained(q_model_name, torch_dtype="auto").cuda().eval()
54
- q_processor = AutoProcessor.from_pretrained(q_model_name, trust_remote_code=True)
55
-
56
- def get_qwen_op(image_file, model, processor):
57
- try:
58
- image = Image.open(image_file).convert('RGB')
59
- conversation = [
60
- {
61
- "role":"user",
62
- "content":[
63
- {
64
- "type":"image",
65
- },
66
- {
67
- "type":"text",
68
- "text":"You are an accurate OCR engine. From the given image, extract the Hindi and other text."
69
- }
70
- ]
71
- }
72
- ]
73
- text_prompt = q_processor.apply_chat_template(conversation, add_generation_prompt=True)
74
- inputs = q_processor(text=[text_prompt], images=[image], padding=True, return_tensors="pt").to("cuda")
75
- inputs = {k: v.to(torch.float32) if torch.is_floating_point(v) else v for k, v in inputs.items()}
76
-
77
- generation_config = {
78
- "max_new_tokens": 1089,
79
- "do_sample": False,
80
- "top_k": 20,
81
- "top_p": 0.90,
82
- "temperature": 0.4,
83
- "pad_token_id": q_processor.tokenizer.pad_token_id,
84
- "eos_token_id": q_processor.tokenizer.eos_token_id,
85
- }
86
-
87
- output_ids = q_model.generate(**inputs, **generation_config)
88
- if 'input_ids' in inputs:
89
- generated_ids = output_ids[:, inputs['input_ids'].shape[1]:]
90
- else:
91
- generated_ids = output_ids
92
-
93
- output_text = q_processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
94
-
95
- return output_text[:] if output_text else "No text extracted from the image."
96
-
97
- except Exception as e:
98
- return f"An error occurred: {str(e)}"
99
-
100
- @spaces.GPU
101
- def run_GOT(image, got_mode, fine_grained_mode="", ocr_color="", ocr_box=""):
102
- unique_id = str(uuid.uuid4())
103
- image_path = os.path.join(UPLOAD_FOLDER, f"{unique_id}.png")
104
- result_path = os.path.join(RESULTS_FOLDER, f"{unique_id}.html")
105
-
106
- shutil.copy(image, image_path)
107
-
108
- try:
109
- if got_mode == "plain texts OCR":
110
- res = model.chat(tokenizer, image_path, ocr_type='ocr')
111
- return res, None
112
- elif got_mode == "format texts OCR":
113
- res = model.chat(tokenizer, image_path, ocr_type='format', render=True, save_render_file=result_path)
114
- elif got_mode == "plain multi-crop OCR":
115
- res = model.chat_crop(tokenizer, image_path, ocr_type='ocr')
116
- return res, None
117
- elif got_mode == "format multi-crop OCR":
118
- res = model.chat_crop(tokenizer, image_path, ocr_type='format', render=True, save_render_file=result_path)
119
- elif got_mode == "plain fine-grained OCR":
120
- res = model.chat(tokenizer, image_path, ocr_type='ocr', ocr_box=ocr_box, ocr_color=ocr_color)
121
- return res, None
122
- elif got_mode == "format fine-grained OCR":
123
- res = model.chat(tokenizer, image_path, ocr_type='format', ocr_box=ocr_box, ocr_color=ocr_color, render=True, save_render_file=result_path)
124
- elif got_mode == "English + Hindi(Qwen2-VL)":
125
- res = get_qwen_op(image_path, q_model, q_processor)
126
- return res, None
127
- # res_markdown = f"$$ {res} $$"
128
- res_markdown = res
129
-
130
- if "format" in got_mode and os.path.exists(result_path):
131
- with open(result_path, 'r') as f:
132
- html_content = f.read()
133
- encoded_html = base64.b64encode(html_content.encode('utf-8')).decode('utf-8')
134
- iframe_src = f"data:text/html;base64,{encoded_html}"
135
- iframe = f'<iframe src="{iframe_src}" width="100%" height="600px"></iframe>'
136
- download_link = f'<a href="data:text/html;base64,{encoded_html}" download="result_{unique_id}.html">Download Full Result</a>'
137
- return res_markdown, f"{download_link}<br>{iframe}"
138
- else:
139
- return res_markdown, None
140
- except Exception as e:
141
- return f"Error: {str(e)}", None
142
- finally:
143
- if os.path.exists(image_path):
144
- os.remove(image_path)
145
-
146
- def task_update(task):
147
- if "fine-grained" in task:
148
- return [
149
- gr.update(visible=True),
150
- gr.update(visible=False),
151
- gr.update(visible=False),
152
- ]
153
- else:
154
- return [
155
- gr.update(visible=False),
156
- gr.update(visible=False),
157
- gr.update(visible=False),
158
- ]
159
-
160
- def fine_grained_update(task):
161
- if task == "box":
162
- return [
163
- gr.update(visible=False, value = ""),
164
- gr.update(visible=True),
165
- ]
166
- elif task == 'color':
167
- return [
168
- gr.update(visible=True),
169
- gr.update(visible=False, value = ""),
170
- ]
171
-
172
- def search_in_text(text, keywords):
173
- """Searches for keywords within the text and highlights matches."""
174
-
175
- if not keywords:
176
- return text
177
-
178
- highlighted_text = text
179
- for keyword in keywords.split():
180
- highlighted_text = highlighted_text.replace(keyword, f"<mark>{keyword}</mark>")
181
-
182
- return highlighted_text
183
-
184
- def cleanup_old_files():
185
- current_time = time.time()
186
- for folder in [UPLOAD_FOLDER, RESULTS_FOLDER]:
187
- for file_path in Path(folder).glob('*'):
188
- if current_time - file_path.stat().st_mtime > 3600: # 1 hour
189
- file_path.unlink()
190
-
191
- title_html = """ OCR Multilingual(GOT OCR 2.O) """
192
-
193
- with gr.Blocks() as demo:
194
- gr.HTML(title_html)
195
- gr.Markdown("""
196
- by Souvik Biswas
197
-
198
- ### Guidelines
199
- Upload your image below and select your preferred mode. Note that more characters may increase wait times.
200
- - **Plain Texts OCR & Format Texts OCR:** Use these modes for basic image-level OCR.
201
- - **Plain Multi-Crop OCR & Format Multi-Crop OCR:** Ideal for images with complex content, offering higher-quality results.
202
- - **Plain Fine-Grained OCR & Format Fine-Grained OCR:** These modes allow you to specify fine-grained regions on the image for more flexible OCR. Regions can be defined by coordinates or colors (red, blue, green, black or white).
203
-
204
- """)
205
-
206
- with gr.Row():
207
- with gr.Column():
208
- image_input = gr.Image(type="filepath", label="upload your image")
209
- task_dropdown = gr.Dropdown(
210
- choices=[
211
- "plain texts OCR",
212
- "format texts OCR",
213
- "plain multi-crop OCR",
214
- "format multi-crop OCR",
215
- "plain fine-grained OCR",
216
- "format fine-grained OCR",
217
- "English + Hindi(Qwen2-VL)"
218
- ],
219
- label="Choose one mode of GOT",
220
- value="plain texts OCR"
221
- )
222
- fine_grained_dropdown = gr.Dropdown(
223
- choices=["box", "color"],
224
- label="fine-grained type",
225
- visible=False
226
- )
227
- color_dropdown = gr.Dropdown(
228
- choices=["red", "green", "blue", "black", "white"],
229
- label="color list",
230
- visible=False
231
- )
232
- box_input = gr.Textbox(
233
- label="input box: [x1,y1,x2,y2]",
234
- placeholder="e.g., [0,0,100,100]",
235
- visible=False
236
- )
237
- submit_button = gr.Button("Submit")
238
-
239
- with gr.Column():
240
- ocr_result = gr.Textbox(label="GOT output")
241
- # Create the Gradio interface
242
- iface = gr.Interface(
243
- fn=search_in_text,
244
- inputs=[
245
- ocr_result,
246
- gr.Textbox(label="Keywords",
247
- placeholder="search keyword e.g., The",
248
- visible=True)],
249
- outputs=gr.HTML(label="Search Results"),
250
- allow_flagging="never"
251
- )
252
- with gr.Column():
253
- if ocr_result.value:
254
- with open("ocr_result.json", "w") as json_file:
255
- json.dump({"text": ocr_result.value}, json_file) # Access the value of the Textbox using .value
256
-
257
- with gr.Column():
258
- gr.Markdown("**If you choose the mode with format, the mathpix result will be automatically rendered as follows:**")
259
- html_result = gr.HTML(label="rendered html", show_label=True)
260
-
261
- task_dropdown.change(
262
- task_update,
263
- inputs=[task_dropdown],
264
- outputs=[fine_grained_dropdown, color_dropdown, box_input]
265
- )
266
- fine_grained_dropdown.change(
267
- fine_grained_update,
268
- inputs=[fine_grained_dropdown],
269
- outputs=[color_dropdown, box_input]
270
- )
271
-
272
- submit_button.click(
273
- run_GOT,
274
- inputs=[image_input, task_dropdown, fine_grained_dropdown, color_dropdown, box_input],
275
- outputs=[ocr_result, html_result]
276
- )
277
-
278
- if __name__ == "__main__":
279
- cleanup_old_files()
280
- demo.launch()
281
-
282
- #!gradio deploy