Spaces:
Running
Running
File size: 2,872 Bytes
f2f0a43 12c7fb3 f2f0a43 12c7fb3 da3d218 12c7fb3 07eb4b6 12c7fb3 07eb4b6 12c7fb3 f2f0a43 12c7fb3 750fee8 5a4aaa7 f2f0a43 12c7fb3 f2f0a43 12c7fb3 f2f0a43 07eb4b6 f2f0a43 07eb4b6 f2f0a43 07eb4b6 f2f0a43 0858b6e f2f0a43 6c5d3cb f2f0a43 6c5d3cb f2f0a43 6c5d3cb f2f0a43 6c5d3cb f2f0a43 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import os
import gradio as gr
import google.generativeai as genai
from dotenv import load_dotenv
import time
# Load environment variables from .env file
load_dotenv()
# Retrieve API key from environment variable
GEMINI_API_KEY = "AIzaSyBpVWF58hw8pIEV3kBM9ckpKBGwXKhDNLo" # public api
# Configure Google Gemini API
genai.configure(api_key=GEMINI_API_KEY)
# Create the model configuration
generation_config = {
"temperature": 0.7,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 512, # Adjust as needed
"response_mime_type": "text/plain",
}
# Simplified safety settings (or try removing them to test)
safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}
]
# Function to generate a response based on user input and chat history
def generate_response(user_input, chat_history):
"""Generates a response based on user input and chat history."""
# Update system content with the full character description
updated_system_content = "You are Shadow the Hedgehog and you must act like Shadow the Hedgehog's personality."
# Create the generative model
model = genai.GenerativeModel(
model_name="gemini-1.5-pro",
generation_config=generation_config,
safety_settings=safety_settings,
system_instruction=updated_system_content,
)
# Add user input to history
chat_history.append(user_input)
# Limit history length to the last 10 messages
chat_history = chat_history[-10:]
retry_attempts = 3
for attempt in range(retry_attempts):
try:
# Start a new chat session
chat_session = model.start_chat()
# Send the entire chat history as the first message
response = chat_session.send_message("\n".join(chat_history))
return response.text, chat_history
except Exception as e:
if attempt < retry_attempts - 1:
time.sleep(2) # Delay before retrying
continue
else:
return f"Error after {retry_attempts} attempts: {str(e)}", chat_history
# Build the Gradio interface
with gr.Blocks(theme="Hev832/Applio") as iface:
gr.Markdown("Duplicate this space in case there is an error or something with your own gemini api key!")
chat_input = gr.Textbox(lines=2, label="Talk to AI", placeholder="Enter your message here...")
chat_history_state = gr.State([]) # State input for chat history
response_output = gr.Textbox(label="Response")
# Define the layout and components
generate_button = gr.Button("Generate Response")
generate_button.click(
fn=generate_response,
inputs=[chat_input, chat_history_state],
outputs=[response_output, chat_history_state]
)
iface.launch()
|