Spaces:
Sleeping
Sleeping
File size: 3,005 Bytes
12c7fb3 17b34a9 12c7fb3 07eb4b6 12c7fb3 07eb4b6 12c7fb3 750fee8 5a4aaa7 12c7fb3 17b34a9 12c7fb3 07eb4b6 17b34a9 5a4aaa7 17b34a9 5a4aaa7 6c5d3cb 07eb4b6 5a4aaa7 6c5d3cb 12c7fb3 2b922e6 6c5d3cb 17b34a9 2b922e6 6c5d3cb 2b922e6 6c5d3cb 5a4aaa7 2b922e6 5a4aaa7 2b922e6 6c5d3cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import gradio as gr
import google.generativeai as genai
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Retrieve API key from environment variable
GEMINI_API_KEY = "AIzaSyA0SnGcdEuesDusLiM93N68-vaFF14RCYg" # public API
# Configure Google Gemini API
genai.configure(api_key=GEMINI_API_KEY)
# Create the model configuration
generation_config = {
"temperature": 0.7,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 512, # Adjust as needed
"response_mime_type": "text/plain",
}
# Simplified safety settings (or try removing them to test)
safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}
]
def generate_response(user_input, chat_history):
"""Generates a response based on user input and chat history."""
# Add user input to history
chat_history.append(("user", user_input))
# Limit history length to the last 10 messages
chat_history = chat_history[-10:]
# Create the generative model
model = genai.GenerativeModel(
model_name="gemini-1.5-pro",
generation_config=generation_config,
safety_settings=safety_settings,
system_instruction="You are Shadow the Hedgehog and you must act like Shadow the Hedgehog's personality.",
)
retry_attempts = 3
for attempt in range(retry_attempts):
try:
# Start a new chat session
chat_session = model.start_chat()
# Format the history for the model
formatted_history = "\n".join([f"{role}: {msg}" for role, msg in chat_history])
response = chat_session.send_message(formatted_history)
# Append the assistant's response to history
chat_history.append(("assistant", response.text))
return chat_history
except Exception as e:
if attempt < retry_attempts - 1:
continue
else:
chat_history.append(("assistant", f"Error after {retry_attempts} attempts: {str(e)}"))
return chat_history
# Build the Gradio interface using Chatbot and Button
with gr.Blocks() as iface:
chatbot = gr.Chatbot() # Create a Chatbot component
user_input = gr.Textbox(label="Talk to AI", placeholder="Enter your message here...", lines=2)
submit_button = gr.Button("Send") # Create a button to submit messages
chat_history_state = gr.State([]) # State input for chat history
# Define the layout and components
submit_button.click(
fn=generate_response,
inputs=[user_input, chat_history_state],
outputs=chatbot
)
# Optional: Clear the input box after submission
def clear_input():
return ""
user_input.submit(
fn=generate_response,
inputs=[user_input, chat_history_state],
outputs=chatbot
).then(clear_input, outputs=[user_input])
iface.launch()
|