|
import gradio as gr |
|
from transformers import AutoModel, AutoTokenizer |
|
from PIL import Image |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True) |
|
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id) |
|
model = model.eval().cuda() |
|
|
|
|
|
def perform_ocr(image): |
|
|
|
if image.mode != "RGB": |
|
image = image.convert("RGB") |
|
|
|
|
|
image_file_path = 'temp_image.jpg' |
|
image.save(image_file_path) |
|
|
|
|
|
res = model.chat(tokenizer, image_file_path, ocr_type='ocr') |
|
|
|
return res |
|
|
|
|
|
def search_keyword(extracted_text, keyword): |
|
|
|
if not keyword.strip(): |
|
return "Please enter a keyword." |
|
|
|
|
|
if keyword.lower() in extracted_text.lower(): |
|
return f"Keyword '{keyword}' found in the extracted text!" |
|
else: |
|
return f"Keyword '{keyword}' not found in the extracted text." |
|
|
|
|
|
def ocr_and_search(image, keyword): |
|
|
|
extracted_text = perform_ocr(image) |
|
|
|
|
|
search_result = search_keyword(extracted_text, keyword) |
|
|
|
|
|
return extracted_text, search_result |
|
|
|
|
|
interface = gr.Interface( |
|
fn=ocr_and_search, |
|
inputs=[gr.Image(type="pil", label="Upload Image"), gr.Textbox(label="Enter Keyword to Search")], |
|
outputs=[gr.Textbox(label="Extracted Text"), gr.Textbox(label="Search Result")], |
|
title="OCR and Document Search Web Application", |
|
description="Upload an image to extract text using the GOT-OCR2_0 model and search for a keyword within the extracted text." |
|
) |
|
|
|
|
|
interface.launch() |
|
|