import os import requests import streamlit as st import PyPDF2 # Get the Hugging Face API Token from environment variables HF_API_TOKEN = os.getenv("HF_API_KEY") if not HF_API_TOKEN: raise ValueError("Hugging Face API Token is not set in the environment variables.") # Hugging Face API URL and header for Gemma 27B-it model GEMMA_27B_API_URL = "https://api-inference.huggingface.co/models/google/gemma-2-27b-it" HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"} def query_model(api_url, payload): response = requests.post(api_url, headers=HEADERS, json=payload) return response.json() def extract_pdf_text(uploaded_file): pdf_text = "" pdf_reader = PyPDF2.PdfFileReader(uploaded_file) num_pages = pdf_reader.numPages for page_num in range(num_pages): page = pdf_reader.getPage(page_num) pdf_text += page.extract_text() return pdf_text def add_message_to_conversation(user_message, bot_message, model_name): if "conversation" not in st.session_state: st.session_state.conversation = [] st.session_state.conversation.append((user_message, bot_message, model_name)) # Streamlit app st.set_page_config(page_title="Gemma 27B-it Chatbot Interface", layout="wide") st.title("Gemma 27B-it Chatbot Interface") st.write("Gemma 27B-it Chatbot Interface") # Initialize session state for conversation and uploaded file if "conversation" not in st.session_state: st.session_state.conversation = [] if "uploaded_file" not in st.session_state: st.session_state.uploaded_file = None # File uploader for PDF uploaded_file = st.file_uploader("Upload a PDF", type="pdf") # Handle PDF upload and text extraction if uploaded_file: pdf_text = extract_pdf_text(uploaded_file) st.write("### PDF Text Extracted:") st.write(pdf_text) # User input for question question = st.text_input("Question", placeholder="Enter your question here...") # Handle user input and Gemma 27B-it model response if st.button("Send") and question: try: with st.spinner("Waiting for the model to respond..."): # Construct the chat history chat_history = " ".join([msg[1] for msg in st.session_state.conversation[-5:]]) + f"User: {question}\n" response = query_model(GEMMA_27B_API_URL, {"inputs": chat_history}) if isinstance(response, list): answer = response[0].get("generated_text", "No response") elif isinstance(response, dict): answer = response.get("generated_text", "No response") else: answer = "No response" # Add PDF text to the chat history if st.session_state.uploaded_file: chat_history += f"Document Text: {pdf_text}\n" add_message_to_conversation(question, answer, "Gemma-2-27B-it") except ValueError as e: st.error(str(e)) # Custom CSS for chat bubbles st.markdown( """ """, unsafe_allow_html=True ) # Display the conversation st.write('