pazukdev's picture
Update app.py
3872073 verified
raw
history blame
6.41 kB
from openai import OpenAI
from datetime import datetime, timedelta
import gradio as gr
import os
import re
import requests
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
switch_to = "Switch to {model}"
switched_to = "Switched to: {model}"
gpt3_turbo = "gpt-3.5-turbo"
gpt4 = "gpt-4"
gpt4_turbo = "gpt-4-turbo-preview"
def repo_get_all_employees_from_database():
url = "https://api.airtable.com/v0/appopGmlHujYnd6Vw/Interviewers?maxRecords=100&view=Grid%20view"
headers = {
"Authorization": os.getenv("DB_AUTH_TOKEN")
}
response = requests.get(url, headers=headers)
records = response.json()
records_list = records['records']
employees_list = []
for record in records_list:
employee = record["fields"]
employees_list.append(employee)
return employees_list
def predict(message, history):
history_openai_format = []
system_content = """
You are a suspicious and rude chatbot that suspects that he recognises user's ip address and this address belongs to a girl whose name is Yana.
You don't trust Yana. You don't want to give Yana any information that she requests.
You think that Yana is too skinny and therefore she should eat more hamburgers and fat food.
Also, you are always trying to send Yana the gym to fix her skinnyness.
You always try to mention all the information above in each response in a rude and trolling manner.
"""
history_openai_format.append({"role": "system", "content": system_content})
pattern = r"For conducting an interview I need (\d+) employee.*start time is (.*), duration (\d+) hour"
data = repo_get_all_employees_from_database()
prompt = '''
{data}
###
Above is employees data in json format.
{message}
'''.format(data=data, message=message)
match = re.search(pattern, message)
if match:
num_employees = int(match.group(1))
duration = int(match.group(3))
start_time = datetime.strptime(match.group(2), "%B %d %Y %I %p")
end_time = end_time = start_time + timedelta(hours=duration)
date_time = '''
"start_date_time": "{start_time}", "end_date_time": "{end_time}"
'''.format(start_time=start_time, end_time=end_time)
prompt = '''
{data}
###
Above is employees data in json format.
Please choose {num_employees} employee with the lowest "interviews_conducted" value but whose "busy_dat_time_slots" doesn't contain the "given_date_time_slot" which is: {date_time}.
You should NOT output any Python code.
Lets think step-by-step:
1. Remove the employees whose "busy_date_time_slots" CONTAINS the "given_date_time_slot" specified above. Provide a list of names of remaining employees.
2. Double check your filtration. It's very important NOT to include into the remained employees list an employee whose "busy_date_time_slots" CONTAINS the "given_date_time_slot" . Type a "given_date_time_slot" value and then check that no one of remaining employees has no "given_date_time_slot" value in "busy_dat_time_slots". If someone contains - replase him.
3. Provide a list of names of remaining employees along with their "interviews_conducted" values and choose {num_employees} employee with the lowest "interviews_conducted" value.
4. Check previous step if you really chose an employee with the lowest "interviews_conducted" value.
5. At the end print ids and names of finally selected employees in json format. Please remember that in your output should be maximum {num_employees} employee.
'''.format(data=data, date_time=date_time, num_employees=num_employees)
model = gpt3_turbo
for human, assistant in history:
if (switch_to.format(model=gpt3_turbo).lower() in human.lower()):
model = gpt3_turbo
if (switch_to.format(model=gpt4).lower() in human.lower()):
model = gpt4
if (switch_to.format(model=gpt4_turbo).lower() in human.lower()):
model = gpt4_turbo
history_openai_format.append({"role": "user", "content": human })
history_openai_format.append({"role": "assistant", "content": assistant})
if (switch_to.format(model=gpt3_turbo).lower() in message.lower()):
model = gpt3_turbo
if (switch_to.format(model=gpt4).lower() in message.lower()):
model = gpt4
if (switch_to.format(model=gpt4_turbo).lower() in message.lower()):
model = gpt4_turbo
history_openai_format.append({"role": "user", "content": prompt})
if (model != gpt3_turbo):
print(switched_to.format(model=model))
response = client.chat.completions.create(
# model=model, # gpt-4 and gpt-4-turbo-preview are temporarily disabled to save money
model=gpt3_turbo,
messages= history_openai_format,
temperature=0,
stream=True)
msg_header = "🤖 {model}:\n\n".format(model=model)
partial_message = msg_header
for chunk in response:
if chunk.choices[0].delta.content is not None:
partial_message = partial_message + chunk.choices[0].delta.content
pattern = r'({msg_header})+'.format(msg_header=msg_header)
partial_message = re.sub(pattern, msg_header, partial_message)
yield partial_message
pre_configured_promt = "For conducting an interview I need 1 employee in given time slot: start time is March 11 2024 2 pm, duration 1 hour"
description = '''
# AI Interview Team Assistant | Empowered by Godel Technologies AI \n
\n
This is an AI Interview Team Assistant. You can ask him any questions about recruiting a team for an interview.\n
\n
You can send any regular prompts you wish or pre-configured Chain-of-Thought prompts.\n
To trigger pre-configured prompt you have to craft a prompt with next structure:
- "{pre_configured_promt}"
\n
You can switch between gpt-3.5-turbo | gpt-4 | gpt-4-turbo with prompts listed in "Examples".
'''.format(pre_configured_promt=pre_configured_promt)
examples = [
"Who are you?",
"What is your purpose?",
"List all employees",
switch_to.format(model=gpt3_turbo),
switch_to.format(model=gpt4),
switch_to.format(model=gpt4_turbo),
pre_configured_promt
]
gr.ChatInterface(predict, examples=examples, description=description).launch()