Kabilash10 commited on
Commit
7d41561
1 Parent(s): f0e12f0

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -41
app.py DELETED
@@ -1,41 +0,0 @@
1
- import streamlit as st
2
- from PIL import Image
3
- import torch
4
- from transformers import AutoProcessor, AutoModelForCausalLM
5
- import io
6
-
7
- # Load the model and processor from Hugging Face
8
- model_id = "Qwen/Qwen2-VL-7B-Instruct"
9
- st.title("Qwen2-VL-7B OCR with Streamlit")
10
-
11
- # Load processor and model
12
- st.write("Loading the model, please wait...")
13
- processor = AutoProcessor.from_pretrained(model_id)
14
- model = AutoModelForCausalLM.from_pretrained(model_id)
15
-
16
- st.write("Model loaded successfully!")
17
-
18
- # Streamlit UI for image upload
19
- uploaded_image = st.file_uploader("Upload an image for OCR", type=["jpg", "jpeg", "png"])
20
-
21
- if uploaded_image is not None:
22
- # Display the uploaded image
23
- image = Image.open(uploaded_image)
24
- st.image(image, caption="Uploaded Image", use_column_width=True)
25
-
26
- # Process the image using the model
27
- st.write("Processing the image...")
28
-
29
- # Convert image to tensor for model input
30
- inputs = processor(images=image, return_tensors="pt")
31
-
32
- # Run inference
33
- with torch.no_grad():
34
- generated_text = model.generate(**inputs, max_new_tokens=200)
35
-
36
- # Decode the generated text
37
- result_text = processor.decode(generated_text[0], skip_special_tokens=True)
38
-
39
- # Display the result
40
- st.write("Extracted Text:")
41
- st.text(result_text)