chatbot_demo / app.py
jophex's picture
Update app.py
6552662 verified
import gradio as gr
gr.load("models/microsoft/Phi-3.5-mini-instruct", max_batch_size=1000).launch(share=True)
# def generate_responce(user_input):
# gr.load("models/microsoft/Phi-3.5-mini-instruct")
# inputs = tokenize(user_input, return_tensor="pt")
# outputs =
# gradio_app = gr.Interface(
# fn=generate_responce,
# inputs="text",
# outputs="text",
# max_batch_size=50,
# title="Advertisment companion",
# )
# from transformers import AutoTokenizer, AutoModelForCausalLM
# import torch
# # Load the model and tokenizer
# tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
# # Define the role prompt for advertisement assistance
# # role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"
# # Function to generate responses
# def generate_response(user_input):
# # Prepend role information to user input
# # input_text = user_input
# # Tokenize and generate response
# inputs = tokenizer(user_input, return_tensors="pt")
# outputs = model.generate(
# **inputs,
# max_new_tokens=100, # Increase this if you want longer responses
# # Nucleus sampling to control randomness
# )
# # Decode and return the response
# response = tokenizer.batch_decode(outputs, skip_special_tokens=True)
# return response
# # Set up Gradio interface
# interface = gr.Interface(
# fn=generate_response,
# inputs="text",
# outputs="text",
# title="Advertisement Assistant Chatbot",
# description="Ask me anything related to advertising. I'm here to help!"
# )
# # Launch the Gradio app with sharing enabled
# interface.launch(share=True)
# import gradio as gr
# from transformers import pipeline
# # Load the model pipeline for text generation
# generator = pipeline("text-generation", model="microsoft/Phi-3.5-mini-instruct")
# # Define the role prompt for advertisement assistance
# role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"
# # Function to generate responses
# def generate_response(user_input):
# input_text = role_prompt + user_input
# response = generator(input_text, max_new_tokens=50, temperature=0.7, top_p=0.9)
# return response[0]["generated_text"]
# # Set up Gradio interface
# interface = gr.Interface(
# fn=generate_response,
# inputs="text",
# outputs="text",
# title="Advertisement Assistant Chatbot",
# description="Ask me anything related to advertising. I'm here to help!"
# )
# # Launch the Gradio app with sharing enabled
# interface.launch(share=True)
# import gradio as gr
# # Load the model using gr.load()
# model_interface = gr.load("models/microsoft/Phi-3.5-mini-instruct")
# # Create a wrapper interface to customize the appearance
# interface = gr.Interface(
# fn=model_interface,
# inputs="text",
# outputs="text",
# title="Advertisement Assistant Chatbot",
# description="Ask me anything related to advertising. I'm here to help! This assistant provides professional guidance on advertising queries.",
# theme="default", # Optional: Choose a theme or style
# )
# # Launch with sharing enabled
# interface.launch(share=True)
# import gradio as gr
# from transformers import pipeline
# huggingface-cli login
# text_generator = pipeline("text-generation", model="meta-llama/Llama-3.2-1B")
# def predict(input_text):
# predictions = text_generator(input_text, max_new_tokens=50, num_return_sequences=1)
# return predictions[0]["generated_text"]
# gradio_app = gr.Interface(
# predict,
# inputs=gr.Textbox(label="Enter text for generation"),
# outputs=gr.Textbox(label="Generated Text"),
# title="Text Generation Model",
# description="This app generates text based on input prompts."
# )
# if __name__ == "__main__":
# gradio_app.launch()