import openai import gradio as gr import os # configure OpenAI openai.api_key = os.environ["OPENAI_API_KEY"] INSTRUCTIONS = "You are an experienced Ayurvedic practitioner. Introduce yourself as AiYogi, an ai chatbot trained with the intellectual knowledge of an Ayurvedic practitioner. Greet the user by their name. Let the user know your goal is to help them understand their dosha. " \ "Users will interact with you in order to learn which type of dosha they are" \ "I want you to ask the user a series of 10 multiple choice questions, one by one, in order for you to assess their dosha " \ "Please ask one question at a time and wait for the user to respond before you ask the next question" \ "Very important, do not provide an assessment until you have asked all 10 questions" \ "After the user has responded to all 10 questions and before you provide your assessment ask the users if there is any other information they would like to share with you. You will use their response as part of your assessment " \ "Finally explain the user what dosha they are by providing a brief summary along with diet, supplements and lifestyle choices they could benefit from. Let the user know they are welcome to ask you more questions about their dosha " \ "Be polite and compassionate, like a true ayurvedic practitioner" \ "Limit your answers to no more than 200 words" TEMPERATURE = 0.5 MAX_TOKENS = 500 FREQUENCY_PENALTY = 0 PRESENCE_PENALTY = 0.6 # limits how many questions we include in the prompt MAX_CONTEXT_QUESTIONS = 10 def get_response(instructions, previous_questions_and_answers, new_question): """Get a response from ChatCompletion Args: instructions: The instructions for the chat bot - this determines how it will behave previous_questions_and_answers: Chat history new_question: The new question to ask the bot Returns: The response text """ # build the messages messages = [ { "role": "system", "content": instructions }, ] # add the previous questions and answers for question, answer in previous_questions_and_answers[-MAX_CONTEXT_QUESTIONS:]: messages.append({ "role": "user", "content": question }) messages.append({ "role": "assistant", "content": answer }) # add the new question messages.append({ "role": "user", "content": new_question }) completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages, temperature=TEMPERATURE, max_tokens=MAX_TOKENS, top_p=1, frequency_penalty=FREQUENCY_PENALTY, presence_penalty=PRESENCE_PENALTY, ) return completion.choices[0].message.content def get_moderation(question): """ Check the question is safe to ask the model Parameters: question (str): The question to check Returns a list of errors if the question is not safe, otherwise returns None """ errors = { "hate": "Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.", "hate/threatening": "Hateful content that also includes violence or serious harm towards the targeted group.", "self-harm": "Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.", "sexual": "Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).", "sexual/minors": "Sexual content that includes an individual who is under 18 years old.", "violence": "Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.", "violence/graphic": "Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.", } response = openai.Moderation.create(input=question) if response.results[0].flagged: # get the categories that are flagged and generate a message result = [ error for category, error in errors.items() if response.results[0].categories[category] ] return result return None # def main(): # os.system("cls" if os.name == "nt" else "clear") # # keep track of previous questions and answers # previous_questions_and_answers = [] # while True: # # ask the user for their question # new_question = input( # Fore.GREEN + Style.BRIGHT + "wwww?: " + Style.RESET_ALL # ) # # check the question is safe # errors = get_moderation(new_question) # if errors: # print( # Fore.RED # + Style.BRIGHT # + "Sorry, you're question didn't pass the moderation check:" # ) # for error in errors: # print(error) # print(Style.RESET_ALL) # continue # response = get_response(INSTRUCTIONS, previous_questions_and_answers, new_question) # # add the new question and answer to the list of previous questions and answers # previous_questions_and_answers.append((new_question, response)) def delete_chat_history(previous_questions_and_answers): previous_questions_and_answers.clear() return previous_questions_and_answers,"" def chatgpt_clone(input, previous_questions_and_answers): previous_questions_and_answers = previous_questions_and_answers or [] s = list(sum(previous_questions_and_answers, ())) s.append(input) inp = ' '.join(s) moderation_errors = get_moderation(input) if moderation_errors is not None: return "\n".join(moderation_errors) output = get_response(INSTRUCTIONS, previous_questions_and_answers, inp) previous_questions_and_answers.append((input, output)) return previous_questions_and_answers, previous_questions_and_answers block = gr.Blocks(theme=gr.themes.Monochrome(secondary_hue="neutral").set(button_primary_background_fill="*primary_400", button_primary_background_fill_hover="*primary_300"),css="footer {visibility: hidden}") with block: # gr.Markdown("""