WSLINMSAI's picture
Update app.py
e5bfc44 verified
raw
history blame
2.12 kB
import gradio as gr
from transformers import pipeline
# Initialize the Hugging Face pipeline with a more advanced model
# Replace "EleutherAI/gpt-neo-2.7B" with other models like "mosaicml/mpt-7b-chat" or "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"
generation_pipeline = pipeline(
"text-generation",
model="EleutherAI/gpt-neo-2.7B", # Replace this with the desired advanced model
device=0 # Use GPU if available
)
def dental_chatbot_response(message, history):
"""
Responds to user queries with a focus on dental terminology.
- Dynamically generates responses using an advanced LLM.
- Designed to address dental-related questions or provide general responses.
"""
print(f"User Input: {message}")
print(f"Chat History: {history}")
# Add a prompt to guide the LLM's focus on dental terminology
prompt = (
f"You are a highly knowledgeable and friendly dental expert chatbot. "
f"Provide detailed and accurate explanations of dental terms, procedures, and treatments. "
f"If the query is not dental-related, respond helpfully and informatively.\n\n"
f"User: {message}\n\n"
f"Chatbot:"
)
# Generate a response using the LLM
generated = generation_pipeline(
prompt,
max_length=200, # Increase max_length for more detailed responses
num_return_sequences=1,
do_sample=True,
top_p=0.9, # Nucleus sampling for diverse responses
top_k=50 # Top-k sampling for quality control
)
# Extract the chatbot's response
ai_response = generated[0]["generated_text"].split("Chatbot:")[1].strip()
print(f"Dental Chatbot Response: {ai_response}")
return ai_response
# Gradio ChatInterface
demo = gr.ChatInterface(
fn=dental_chatbot_response,
title="Advanced Dental Terminology Chatbot",
description=(
"Ask me anything about dental terms, procedures, and treatments! "
"This chatbot is powered by an advanced LLM for detailed and accurate answers."
)
)
if __name__ == "__main__":
demo.launch()