Spaces:
Sleeping
Sleeping
File size: 1,226 Bytes
ff470d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import torch
from transformers import AutoProcessor, AutoModelForVision2Seq
from PIL import Image
import requests
import matplotlib.pyplot as plt
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load processor and model
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
def perform_ocr(image_path: str):
# Load image
image = Image.open(image_path).convert("RGB")
# Preprocess image
inputs = processor(images=image, return_tensors="pt").to(device)
# Generate text
with torch.no_grad():
generated_ids = model.generate(**inputs)
# Decode generated text
extracted_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
return extracted_text
# Example usage
if __name__ == "__main__":
IMAGE_PATH = "Images\Hindi-to-English-sentences-translation.jpg" # Replace with the path to your image
# Perform OCR
extracted_text = perform_ocr(IMAGE_PATH)
# Display results
print("Extracted Text:", extracted_text)
# Show image
img = Image.open(IMAGE_PATH)
plt.imshow(img)
plt.axis("off")
plt.show()
|