Hev832 commited on
Commit
f2f0a43
1 Parent(s): b72347a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -35
app.py CHANGED
@@ -1,12 +1,14 @@
 
1
  import gradio as gr
2
  import google.generativeai as genai
3
  from dotenv import load_dotenv
 
4
 
5
  # Load environment variables from .env file
6
  load_dotenv()
7
 
8
  # Retrieve API key from environment variable
9
- GEMINI_API_KEY = "AIzaSyA0SnGcdEuesDusLiM93N68-vaFF14RCYg" # public API
10
 
11
  # Configure Google Gemini API
12
  genai.configure(api_key=GEMINI_API_KEY)
@@ -26,66 +28,57 @@ safety_settings = [
26
  {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}
27
  ]
28
 
 
29
  def generate_response(user_input, chat_history):
30
  """Generates a response based on user input and chat history."""
31
-
32
- # Add user input to history as a tuple
33
- chat_history.append(("user", user_input))
34
 
35
- # Limit history length to the last 10 messages
36
- chat_history = chat_history[-10:]
37
 
38
  # Create the generative model
39
  model = genai.GenerativeModel(
40
  model_name="gemini-1.5-pro",
41
  generation_config=generation_config,
42
  safety_settings=safety_settings,
43
- system_instruction="You are Shadow the Hedgehog and you must act like Shadow the Hedgehog's personality.",
44
  )
45
 
 
 
 
 
 
 
46
  retry_attempts = 3
47
  for attempt in range(retry_attempts):
48
  try:
49
  # Start a new chat session
50
  chat_session = model.start_chat()
51
 
52
- # Format the history for the model
53
- formatted_history = "\n".join([f"{role}: {msg}" for role, msg in chat_history])
54
- response = chat_session.send_message(formatted_history)
55
-
56
- # Append the assistant's response to history as a tuple
57
- chat_history.append(("assistant", response.text))
58
- return chat_history
59
 
60
  except Exception as e:
61
  if attempt < retry_attempts - 1:
 
62
  continue
63
  else:
64
- chat_history.append(("assistant", f"Error after {retry_attempts} attempts: {str(e)}"))
65
- return chat_history
66
-
67
- # Build the Gradio interface using Chatbot and Button
68
- with gr.Blocks() as iface:
69
- chatbot = gr.Chatbot() # Create a Chatbot component
70
- user_input = gr.Textbox(label="Talk to AI", placeholder="Enter your message here...", lines=2)
71
- submit_button = gr.Button("Send") # Create a button to submit messages
72
  chat_history_state = gr.State([]) # State input for chat history
 
73
 
74
  # Define the layout and components
75
- submit_button.click(
 
76
  fn=generate_response,
77
- inputs=[user_input, chat_history_state],
78
- outputs=chatbot
79
  )
80
 
81
- # Optional: Clear the input box after submission
82
- def clear_input():
83
- return ""
84
-
85
- user_input.submit(
86
- fn=generate_response,
87
- inputs=[user_input, chat_history_state],
88
- outputs=chatbot
89
- ).then(clear_input, outputs=[user_input])
90
-
91
  iface.launch()
 
 
1
+ import os
2
  import gradio as gr
3
  import google.generativeai as genai
4
  from dotenv import load_dotenv
5
+ import time
6
 
7
  # Load environment variables from .env file
8
  load_dotenv()
9
 
10
  # Retrieve API key from environment variable
11
+ GEMINI_API_KEY = "AIzaSyA0SnGcdEuesDusLiM93N68-vaFF14RCYg" # public api
12
 
13
  # Configure Google Gemini API
14
  genai.configure(api_key=GEMINI_API_KEY)
 
28
  {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}
29
  ]
30
 
31
+ # Function to generate a response based on user input and chat history
32
  def generate_response(user_input, chat_history):
33
  """Generates a response based on user input and chat history."""
 
 
 
34
 
35
+ # Update system content with the full character description
36
+ updated_system_content = "You are Shadow the Hedgehog and you must act like Shadow the Hedgehog's personality."
37
 
38
  # Create the generative model
39
  model = genai.GenerativeModel(
40
  model_name="gemini-1.5-pro",
41
  generation_config=generation_config,
42
  safety_settings=safety_settings,
43
+ system_instruction=updated_system_content,
44
  )
45
 
46
+ # Add user input to history
47
+ chat_history.append(user_input)
48
+
49
+ # Limit history length to the last 10 messages
50
+ chat_history = chat_history[-10:]
51
+
52
  retry_attempts = 3
53
  for attempt in range(retry_attempts):
54
  try:
55
  # Start a new chat session
56
  chat_session = model.start_chat()
57
 
58
+ # Send the entire chat history as the first message
59
+ response = chat_session.send_message("\n".join(chat_history))
60
+ return response.text, chat_history
 
 
 
 
61
 
62
  except Exception as e:
63
  if attempt < retry_attempts - 1:
64
+ time.sleep(2) # Delay before retrying
65
  continue
66
  else:
67
+ return f"Error after {retry_attempts} attempts: {str(e)}", chat_history
68
+
69
+ # Build the Gradio interface
70
+ with gr.Blocks(theme="Hev832/Applio") as iface:
71
+ chat_input = gr.Textbox(lines=2, label="Talk to AI", placeholder="Enter your message here...")
 
 
 
72
  chat_history_state = gr.State([]) # State input for chat history
73
+ response_output = gr.Textbox(label="Response")
74
 
75
  # Define the layout and components
76
+ generate_button = gr.Button("Generate Response")
77
+ generate_button.click(
78
  fn=generate_response,
79
+ inputs=[chat_input, chat_history_state],
80
+ outputs=[response_output, chat_history_state]
81
  )
82
 
 
 
 
 
 
 
 
 
 
 
83
  iface.launch()
84
+