|
import streamlit as st |
|
from PIL import Image |
|
import os |
|
import base64 |
|
from helper import ( |
|
custom_file_uploader, resize_image, convert_image_to_base64, post_request_and_parse_response, |
|
draw_bounding_boxes_for_textract, extract_text_from_textract_blocks, ChatGPTClient |
|
) |
|
import tempfile |
|
import shutil |
|
from pdf2image import convert_from_bytes |
|
|
|
|
|
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] |
|
TEXTRACT_API_URL = os.environ["TEXTRACT_API_URL"] |
|
|
|
st.set_page_config(page_title="π¬ Chat with OCR π", layout="wide") |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
with st.sidebar: |
|
st.title("πΌοΈ Upload and Display Images") |
|
|
|
|
|
st.warning("Please upload an image or a single-page PDF file!") |
|
uploaded_file = st.file_uploader("Upload an Image or PDF", type=['TXT', 'PDF'], label_visibility="collapsed") |
|
|
|
pil_image = None |
|
if uploaded_file: |
|
|
|
if uploaded_file.type == "application/pdf": |
|
try: |
|
|
|
pdf_bytes = uploaded_file.read() |
|
pages = convert_from_bytes(pdf_bytes, dpi=200) |
|
if len(pages) != 1: |
|
st.warning("Please upload a PDF with only one page!") |
|
else: |
|
pil_image = pages[0] |
|
except Exception as e: |
|
st.error(f"Failed to convert PDF to image: {e}") |
|
else: |
|
|
|
pil_image = Image.open(uploaded_file) |
|
|
|
if pil_image: |
|
resized_image = resize_image(pil_image) |
|
|
|
with st.expander("Original Image", expanded=False): |
|
st.image(pil_image, caption="Uploaded Image", use_column_width=True) |
|
|
|
|
|
image_base64 = convert_image_to_base64(resized_image) |
|
payload = {"image": image_base64} |
|
result_dict = post_request_and_parse_response(TEXTRACT_API_URL, payload) |
|
|
|
|
|
image_with_boxes = draw_bounding_boxes_for_textract(resized_image.copy(), result_dict) |
|
|
|
with st.expander("Image with Bounding Boxes", expanded=True): |
|
st.image(image_with_boxes, caption="Image with Bounding Boxes", use_column_width=True) |
|
|
|
|
|
cleaned_up_body = extract_text_from_textract_blocks(result_dict['body']) |
|
|
|
|
|
with st.expander("View JSON Body", expanded=False): |
|
st.json(result_dict) |
|
|
|
|
|
with st.expander("View Cleaned-up Text", expanded=False): |
|
st.text(cleaned_up_body) |
|
|
|
|
|
st.sidebar.markdown("<br><br><br><br>", unsafe_allow_html=True) |
|
|
|
|
|
if st.sidebar.button("Clear Session"): |
|
st.session_state.messages = [] |
|
|
|
|
|
st.title("Chat with OCR Output") |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if uploaded_file and pil_image: |
|
history_copy = st.session_state.messages.copy() |
|
|
|
if cleaned_up_body: |
|
history_copy.append({"role": "system", "content": cleaned_up_body}) |
|
|
|
bot = ChatGPTClient( |
|
api_key=OPENAI_API_KEY, |
|
protocol="You are fed with the text portion of json file that come out of OCR after scanning an image. User will ask you questions about this json file.", |
|
body=cleaned_up_body |
|
) |
|
bot.history = history_copy |
|
|
|
|
|
if prompt := st.chat_input("Ask me about the image"): |
|
|
|
st.chat_message("user").markdown(prompt) |
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
if uploaded_file and pil_image: |
|
response = bot.generate_response(prompt) |
|
else: |
|
response = "Please upload an image before asking questions." |
|
|
|
|
|
st.chat_message("assistant").markdown(response) |
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|