GenAIEfrei / app.py
Moha782's picture
Update app.py
76e0d03 verified
raw
history blame
2.81 kB
import gradio as gr
from huggingface_hub import InferenceClient
from pathlib import Path
from typing import List
from pdfplumber import open as open_pdf
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Load the PDF file
pdf_path = Path("path/to/your/pdf/file.pdf")
with open_pdf(pdf_path) as pdf:
text = "\n".join(page.extract_text() for page in pdf.pages)
# Split the PDF text into chunks
chunk_size = 1000 # Adjust this value based on your needs
text_chunks: List[str] = [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
# Pass relevant chunks as context
relevant_chunks = [chunk for chunk in text_chunks if message.lower() in chunk.lower()]
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
files={"context": "\n".join(relevant_chunks)}, # Pass relevant chunks as context
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be conversational in your responses. You should remember the user car model and tailor your answers accordingly. You limit yourself to answering the given question and maybe propose a suggestion but not write the next question of the user. \n\nUser: ", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()