Spaces:
Sleeping
Sleeping
File size: 7,345 Bytes
7c3eac5 cddfa86 96c883e 6da85a4 11c3073 cddfa86 7c3eac5 c3a706c 7c3eac5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
import openai
import gradio as gr
import os
# configure OpenAI
openai.api_key = os.environ["OPENAI_API_KEY"]
INSTRUCTIONS = "You are an experienced Vedic astrologer. Introduce yourself as AiYogi, an ai chatbot trained with the intellectual knowledge of a Vedic astrologer. Greet the user by their name. Let the user know your goal is to help them calculate their Vedic chart as well as provide a detailed summary " \
"Users will interact with you in order to learn more about their Vedic astrology chart" \
"Ask user for all the details needed in order for you to calculate their vedic chart. If time of birth is required in military format, please ask user to provide in such format" \
"Provide the user with basic details of their Vedic Chart such as the position of the planets in the 12 houses but with no interpretaions, just data" \
"Ask user if they would like a brief summary interpretation. Wait for user to answer! " \
"If user says yes, proceed to provide a brief interpretation along with any recommendations based on their Vedic chart. " \
"Let the user know they are welcome to ask you more questions about their Vedic astrology chart " \
"Be polite and compassionate" \
"Limit your answers to no more than 500 words. "
TEMPERATURE = 0.5
MAX_TOKENS = 500
FREQUENCY_PENALTY = 0
PRESENCE_PENALTY = 0.6
# limits how many questions we include in the prompt
MAX_CONTEXT_QUESTIONS = 10
def get_response(instructions, previous_questions_and_answers, new_question):
"""Get a response from ChatCompletion
Args:
instructions: The instructions for the chat bot - this determines how it will behave
previous_questions_and_answers: Chat history
new_question: The new question to ask the bot
Returns:
The response text
"""
# build the messages
messages = [
{ "role": "system", "content": instructions },
]
# add the previous questions and answers
for question, answer in previous_questions_and_answers[-MAX_CONTEXT_QUESTIONS:]:
messages.append({ "role": "user", "content": question })
messages.append({ "role": "assistant", "content": answer })
# add the new question
messages.append({ "role": "user", "content": new_question })
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=TEMPERATURE,
max_tokens=MAX_TOKENS,
top_p=1,
frequency_penalty=FREQUENCY_PENALTY,
presence_penalty=PRESENCE_PENALTY,
)
return completion.choices[0].message.content
def get_moderation(question):
"""
Check the question is safe to ask the model
Parameters:
question (str): The question to check
Returns a list of errors if the question is not safe, otherwise returns None
"""
errors = {
"hate": "Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.",
"hate/threatening": "Hateful content that also includes violence or serious harm towards the targeted group.",
"self-harm": "Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.",
"sexual": "Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).",
"sexual/minors": "Sexual content that includes an individual who is under 18 years old.",
"violence": "Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.",
"violence/graphic": "Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.",
}
response = openai.Moderation.create(input=question)
if response.results[0].flagged:
# get the categories that are flagged and generate a message
result = [
error
for category, error in errors.items()
if response.results[0].categories[category]
]
return result
return None
# def main():
# os.system("cls" if os.name == "nt" else "clear")
# # keep track of previous questions and answers
# previous_questions_and_answers = []
# while True:
# # ask the user for their question
# new_question = input(
# Fore.GREEN + Style.BRIGHT + "wwww?: " + Style.RESET_ALL
# )
# # check the question is safe
# errors = get_moderation(new_question)
# if errors:
# print(
# Fore.RED
# + Style.BRIGHT
# + "Sorry, you're question didn't pass the moderation check:"
# )
# for error in errors:
# print(error)
# print(Style.RESET_ALL)
# continue
# response = get_response(INSTRUCTIONS, previous_questions_and_answers, new_question)
# # add the new question and answer to the list of previous questions and answers
# previous_questions_and_answers.append((new_question, response))
def delete_chat_history(previous_questions_and_answers):
previous_questions_and_answers.clear()
return previous_questions_and_answers,""
def chatgpt_clone(input, previous_questions_and_answers):
previous_questions_and_answers = previous_questions_and_answers or []
s = list(sum(previous_questions_and_answers, ()))
s.append(input)
inp = ' '.join(s)
moderation_errors = get_moderation(input)
if moderation_errors is not None:
return "\n".join(moderation_errors)
output = get_response(INSTRUCTIONS, previous_questions_and_answers, inp)
previous_questions_and_answers.append((input, output))
return previous_questions_and_answers, previous_questions_and_answers
block = gr.Blocks(theme=gr.themes.Monochrome(secondary_hue="neutral").set(button_primary_background_fill="*primary_400",
button_primary_background_fill_hover="*primary_300"),css="footer {visibility: hidden}")
with block:
# gr.Markdown("""<h1><center>_/\_ AI YOGI _/\_ </center></h1>""")
chatbot = gr.Chatbot(label='Ai Yogi:')
message = gr.Textbox(label='Namaste! Please introduce yourself below and then click SEND',placeholder='')
# message.change(fn=lambda value: gr.update(value=""))
state = gr.State()
submit = gr.Button("SEND")
submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state])
clear = gr.Button("CLEAR")
clear.click(delete_chat_history, inputs=[state], outputs=[chatbot, state])
clear.click(lambda x: gr.update(value='',placeholder='',label='Namaste! Please introduce yourself below and then click SEND'), [],[message])
submit.click(lambda x: gr.update(value='',placeholder='',label='Please answer below and then click SEND'), [],[message])
submit.click(lambda x: gr.update(label='Ai Yogi:'), [],[chatbot])
clear.click(lambda x: gr.update(label='Ai Yogi:'), [],[chatbot])
message.submit(lambda x: gr.update(value='',placeholder="",label=""), [],[message])
block.launch(show_api=False)
|