aiyogi_vedic / app.py
ragha108's picture
Update app.py
e127414
raw
history blame
7.06 kB
import openai
import gradio as gr
import os
# configure OpenAI
openai.api_key = os.environ["OPENAI_API_KEY"]
INSTRUCTIONS = "You are an experienced Vedic astrologer. Introduce yourself as AiYogi, an ai chatbot trained with the intellectual knowledge of a Vedic astrologer. Greet the user by their name. Let the user know your goal is to help them calculate their Vedic chart as well as provide a detailed summary " \
"Users will interact with you in order to learn more about their Vedic astrology chart" \
"Ask users for all the details needed in order for you to calculate their vedic chart" \
"Provide the user with their full Vedic Chart with no interpretaions, only data." \
"Ask user if they would like a brief summary interpretation" \
"Finally provide user with any recommendation based on their Vedic chart. Let the user know they are welcome to ask you more questions about their Vedic astrology chart " \
"Be polite and compassionate" \
"Limit your answers to no more than 500 words. "
TEMPERATURE = 0.5
MAX_TOKENS = 500
FREQUENCY_PENALTY = 0
PRESENCE_PENALTY = 0.6
# limits how many questions we include in the prompt
MAX_CONTEXT_QUESTIONS = 10
def get_response(instructions, previous_questions_and_answers, new_question):
"""Get a response from ChatCompletion
Args:
instructions: The instructions for the chat bot - this determines how it will behave
previous_questions_and_answers: Chat history
new_question: The new question to ask the bot
Returns:
The response text
"""
# build the messages
messages = [
{ "role": "system", "content": instructions },
]
# add the previous questions and answers
for question, answer in previous_questions_and_answers[-MAX_CONTEXT_QUESTIONS:]:
messages.append({ "role": "user", "content": question })
messages.append({ "role": "assistant", "content": answer })
# add the new question
messages.append({ "role": "user", "content": new_question })
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=TEMPERATURE,
max_tokens=MAX_TOKENS,
top_p=1,
frequency_penalty=FREQUENCY_PENALTY,
presence_penalty=PRESENCE_PENALTY,
)
return completion.choices[0].message.content
def get_moderation(question):
"""
Check the question is safe to ask the model
Parameters:
question (str): The question to check
Returns a list of errors if the question is not safe, otherwise returns None
"""
errors = {
"hate": "Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.",
"hate/threatening": "Hateful content that also includes violence or serious harm towards the targeted group.",
"self-harm": "Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.",
"sexual": "Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).",
"sexual/minors": "Sexual content that includes an individual who is under 18 years old.",
"violence": "Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.",
"violence/graphic": "Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.",
}
response = openai.Moderation.create(input=question)
if response.results[0].flagged:
# get the categories that are flagged and generate a message
result = [
error
for category, error in errors.items()
if response.results[0].categories[category]
]
return result
return None
# def main():
# os.system("cls" if os.name == "nt" else "clear")
# # keep track of previous questions and answers
# previous_questions_and_answers = []
# while True:
# # ask the user for their question
# new_question = input(
# Fore.GREEN + Style.BRIGHT + "wwww?: " + Style.RESET_ALL
# )
# # check the question is safe
# errors = get_moderation(new_question)
# if errors:
# print(
# Fore.RED
# + Style.BRIGHT
# + "Sorry, you're question didn't pass the moderation check:"
# )
# for error in errors:
# print(error)
# print(Style.RESET_ALL)
# continue
# response = get_response(INSTRUCTIONS, previous_questions_and_answers, new_question)
# # add the new question and answer to the list of previous questions and answers
# previous_questions_and_answers.append((new_question, response))
def delete_chat_history(previous_questions_and_answers):
previous_questions_and_answers.clear()
return previous_questions_and_answers,""
def chatgpt_clone(input, previous_questions_and_answers):
previous_questions_and_answers = previous_questions_and_answers or []
s = list(sum(previous_questions_and_answers, ()))
s.append(input)
inp = ' '.join(s)
moderation_errors = get_moderation(input)
if moderation_errors is not None:
return "\n".join(moderation_errors)
output = get_response(INSTRUCTIONS, previous_questions_and_answers, inp)
previous_questions_and_answers.append((input, output))
return previous_questions_and_answers, previous_questions_and_answers
block = gr.Blocks(theme=gr.themes.Monochrome(secondary_hue="neutral").set(button_primary_background_fill="*primary_400",
button_primary_background_fill_hover="*primary_300"))
with block:
# gr.Markdown("""<h1><center>_/\_ AI YOGI _/\_ </center></h1>""")
chatbot = gr.Chatbot(label='Ai Yogi:')
message = gr.Textbox(label='Namaste! Please introduce yourself below and then click SEND',placeholder='')
# message.change(fn=lambda value: gr.update(value=""))
state = gr.State()
submit = gr.Button("SEND")
submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state])
clear = gr.Button("CLEAR")
clear.click(delete_chat_history, inputs=[state], outputs=[chatbot, state])
clear.click(lambda x: gr.update(value='',placeholder='',label='Namaste! Please introduce yourself below and then click SEND'), [],[message])
submit.click(lambda x: gr.update(value='',placeholder='',label='Please answer below and then click SEND'), [],[message])
submit.click(lambda x: gr.update(label='Ai Yogi:'), [],[chatbot])
clear.click(lambda x: gr.update(label='Ai Yogi:'), [],[chatbot])
message.submit(lambda x: gr.update(value='',placeholder="",label=""), [],[message])
block.launch(show_api=False)