Spaces:
Running
Running
File size: 2,239 Bytes
a2a4c11 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import os
from openai import OpenAI
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Initialize OpenAI client with API key from .env
openai_client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
def run_gpt4o_mini(question):
try:
# Check if the question is a dictionary and extract the prompt
if isinstance(question, dict) and 'prompt' in question:
question_text = question['prompt']
else:
question_text = str(question)
response = openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant. Answer the question to the best of your ability."},
{"role": "user", "content": question_text}
]
)
return response.choices[0].message.content
except Exception as e:
print(f"Error running GPT-4o-mini: {str(e)}")
return None
def run_gpt4o(question):
try:
# Check if the question is a dictionary and extract the prompt
if isinstance(question, dict) and 'prompt' in question:
question_text = question['prompt']
else:
question_text = str(question)
response = openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant. Answer the question to the best of your ability."},
{"role": "user", "content": question_text}
]
)
return response.choices[0].message.content
except Exception as e:
print(f"Error running GPT-4o-mini: {str(e)}")
return None
def run_custom_model(model_name, question):
# Placeholder for custom model logic
# You'll need to implement this based on how your custom models work
return f"Custom model {model_name} response: This is a placeholder answer for the question provided."
def run_model(model_name, question):
if model_name == "gpt-4o-mini":
return run_gpt4o_mini(question)
elif model_name == "gpt-4o":
return run_gpt4o(question)
else:
return run_custom_model(model_name, question) |