Spaces:
Runtime error
Runtime error
from huggingface_hub import InferenceClient | |
import gradio as gr | |
from datetime import datetime | |
from textblob import TextBlob # for typo correction | |
API_URL = "https://api-inference.huggingface.co/models/" | |
# Initialize the InferenceClient | |
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1") | |
def format_prompt(message, history): | |
"""Format the prompt for the text generation model.""" | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def correct_typos(text): | |
"""Correct typos in the text using TextBlob.""" | |
corrected_text = str(TextBlob(text).correct()) | |
return corrected_text | |
def generate(prompt, history): | |
"""Generate a response using the text generation model.""" | |
# Correct typos in the prompt | |
prompt = correct_typos(prompt) | |
# Check if the prompt is asking who created the bot | |
if "who created you" in prompt.lower(): | |
return "I was created by Aniket Kumar and many more." | |
# Handle small talk | |
elif "how are you" in prompt.lower(): | |
return "I'm an AI and don't have feelings, but I'm here to help you. How can I assist you today?" | |
# Set up parameters for text generation | |
generate_kwargs = dict( | |
temperature=0.9, | |
max_new_tokens=512, | |
top_p=0.95, | |
repetition_penalty=1.0, | |
do_sample=True, | |
) | |
# Format the prompt | |
formatted_prompt = format_prompt(prompt, history) | |
# Generate the response | |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
def create_interface(): | |
"""Create the Gradio interface.""" | |
customCSS = """ | |
#component-7 { # this is the default element ID of the chat component | |
height: 800px; # adjust the height as needed | |
flex-grow: 1; | |
} | |
""" | |
with gr.Blocks(css=customCSS) as demo: | |
gr.ChatInterface( | |
generate, | |
) | |
demo.queue().launch(debug=True) | |
# Run the application | |
create_interface() | |