Basic_OCR_Model / app.py
Saurabh1207's picture
Create app.py
ff470d8 verified
import torch
from transformers import AutoProcessor, AutoModelForVision2Seq
from PIL import Image
import requests
import matplotlib.pyplot as plt
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load processor and model
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
def perform_ocr(image_path: str):
# Load image
image = Image.open(image_path).convert("RGB")
# Preprocess image
inputs = processor(images=image, return_tensors="pt").to(device)
# Generate text
with torch.no_grad():
generated_ids = model.generate(**inputs)
# Decode generated text
extracted_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
return extracted_text
# Example usage
if __name__ == "__main__":
IMAGE_PATH = "Images\Hindi-to-English-sentences-translation.jpg" # Replace with the path to your image
# Perform OCR
extracted_text = perform_ocr(IMAGE_PATH)
# Display results
print("Extracted Text:", extracted_text)
# Show image
img = Image.open(IMAGE_PATH)
plt.imshow(img)
plt.axis("off")
plt.show()