File size: 6,653 Bytes
095b95d 6779aa0 095b95d 6779aa0 095b95d 005e6cc 095b95d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import gradio as gr
from openai import OpenAI
import os
import sqlite3
import base64
# Read API key from environment variable
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
if not OPENAI_API_KEY:
raise ValueError("API key not found. Please set the OPENAI_API_KEY environment variable.")
client = OpenAI(api_key=OPENAI_API_KEY)
# Database setup
conn = sqlite3.connect('faqs.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS faq (id INTEGER PRIMARY KEY, question TEXT, answer TEXT)''')
conn.commit()
global_system_prompt = None
global_model = 'gpt-4o'
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
def build_assistant(field, lang, name, model, description, rules):
global global_system_prompt
global global_model
list_faqs = get_faqs()
system_prompt = f'''You are a helpful chatbot that helps customers and answers based on FAQs.
You must answer only in {lang}.
your name is {name}.
'''
global_system_prompt = system_prompt
if len(description) > 0:
global_system_prompt = system_prompt + ' {description}.'
if model != 'gpt-4o':
global_model = model
if len(rules) > 0:
global_system_prompt = system_prompt + f'you must follow these rules: {rules}'
if len(list_faqs) > 0:
global_system_prompt = system_prompt + f'if the customer asks a question first check these list of faqs for the answer. if theres is no answer suggest this phone number to the customer to call 09999999999'
def add_faq(question, answer):
conn = sqlite3.connect('faqs.db')
c = conn.cursor()
c.execute('INSERT INTO faq (question, answer) VALUES (?, ?)', (question, answer))
conn.commit()
conn.close()
def get_faqs():
faq_list = ''
conn = sqlite3.connect('faqs.db')
c = conn.cursor()
c.execute('SELECT question, answer FROM faq')
faqs = c.fetchall()
if len(faqs) > 0:
faq_list = "\n\n".join([f"Q: {faq[0]}\nA: {faq[1]}" for faq in faqs])
conn.close()
return faq_list
def send_message(user_message, chat_history):
chat_history.append((f"User: {user_message}", 'Hi there'))
return "", chat_history
def convert_history_to_openai_format(history):
"""
Convert chat history to OpenAI format.
Parameters:
history (list of tuples): The chat history where each tuple consists of (message, sender).
Returns:
list of dict: The formatted history for OpenAI with "role" as either "user" or "assistant".
"""
global global_system_prompt
if global_system_prompt == None:
global_system_prompt = "You are a helpful assistant."
formatted_history = [{"role": "system", "content": global_system_prompt},]
for user_msg, assistant_msg in history:
if ('.png' in user_msg[0]) or ('.jpg' in user_msg[0]):
encoded_image = encode_image(user_msg[0])
text = 'help me based on the image'
if user_msg[1] != '':
text = user_msg[1]
content = [{'type':'text', 'text':text},{'type':'image_url','image_url':{'url':f'data:image/jpeg;base64,{encoded_image}'}}]
formatted_history.append({"role": 'user', "content": content})
else:
formatted_history.append({"role": 'user', "content": user_msg})
if isinstance(assistant_msg,str):
formatted_history.append({"role": 'assistant', "content": assistant_msg})
return formatted_history
def add_message(history, message):
if len(message["files"]) > 0:
for x in message["files"]:
history.append(((x,message["text"]), None))
else:
if message["text"]!='':
history.append((message["text"], None))
print(history)
return history, gr.MultimodalTextbox(value=None, interactive=False)
def bot(history):
global global_model
response = client.chat.completions.create(
model=global_model,
messages=convert_history_to_openai_format(history)
)
chatbot_message = response.choices[0].message.content.strip()
history[-1][1] = chatbot_message
return history
# Create Gradio interface
with gr.Blocks() as demo:
# Assistant settings section
warning_markdown = gr.Markdown(value="", visible=False)
with gr.Row():
with gr.Column(scale=1, min_width=200):
gr.Markdown("### Assistant settings")
field = gr.Textbox(label="Field", value='AI')
lang = gr.Dropdown(label='Language', choices=['English', 'Persian'], value='English')
name = gr.Textbox(label="Name", value='AIBOT')
model = gr.Dropdown(label="Model", choices=['gpt-4o','gpt-4','gpt-3.5'], value='gpt-4o')
description = gr.Textbox(label="Description", lines=3)
rules = gr.Textbox(label="Rules", lines=3)
build_button = gr.Button("Build")
# Add FAQ section
with gr.Column(scale=1, min_width=200):
gr.Markdown("### Add FAQ")
question = gr.Textbox(label="Question", lines=2)
answer = gr.Textbox(label="Answer", lines=3)
add_button = gr.Button("Add")
# List of FAQs section
with gr.Column(scale=1, min_width=200):
gr.Markdown("### List of FAQs")
faq_list = gr.Textbox(label="", interactive=False, lines=15, max_lines=15, placeholder="No FAQs available")
refresh_button = gr.Button("Refresh")
# Chatbot Playground section
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Chatbot Playground")
chatbot = gr.Chatbot(label="Chatbot:", bubble_full_width=False,show_copy_button=True,min_width=400,
avatar_images=(os.path.join(os.getcwd(),'user.png'),os.path.join(os.getcwd(),'ai.png')))
chat_input = gr.MultimodalTextbox(interactive=True,
placeholder="Enter message or upload file...", show_label=False)
# Define button actions
build_button.click(build_assistant, inputs=[field, lang,
name, model, description, rules], outputs=[])
add_button.click(add_faq, inputs=[question, answer], outputs=[])
refresh_button.click(get_faqs, inputs=[], outputs=[faq_list])
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
# Launch the demo
demo.launch()
|