Spaces:
Running
Running
import os | |
from openai import OpenAI | |
from dotenv import load_dotenv | |
import openai | |
import io | |
from PIL import Image | |
import base64 # {{ edit_add: Import base64 for image conversion }} | |
import requests # Add this import for making HTTP requests to Hugging Face | |
# Load environment variables | |
load_dotenv() | |
# Initialize OpenAI client with API key from .env | |
openai_client = OpenAI(api_key=os.getenv('OPENAI_API_KEY')) | |
def run_gpt4o_mini(question): | |
try: | |
# Check if the question is a dictionary and extract the prompt | |
if isinstance(question, dict) and 'prompt' in question: | |
question_text = question['prompt'] | |
else: | |
question_text = str(question) | |
response = openai_client.chat.completions.create( | |
model="gpt-4o-mini", | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant. Answer the question to the best of your ability."}, | |
{"role": "user", "content": question_text} | |
] | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
print(f"Error running GPT-4o-mini: {str(e)}") | |
return None | |
def run_gpt4o(question): | |
try: | |
# Check if the question is a dictionary and extract the prompt | |
if isinstance(question, dict) and 'prompt' in question: | |
question_text = question['prompt'] | |
else: | |
question_text = str(question) | |
response = openai_client.chat.completions.create( | |
model="gpt-4o-mini", | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant. Answer the question to the best of your ability."}, | |
{"role": "user", "content": question_text} | |
] | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
print(f"Error running GPT-4o-mini: {str(e)}") | |
return None | |
def run_custom_model(model_name, question): | |
# Placeholder for custom model logic | |
# You'll need to implement this based on how your custom models work | |
return f"Custom model {model_name} response: This is a placeholder answer for the question provided." | |
def run_huggingface_model(endpoint, token, prompt, context): | |
""" | |
Runs the Hugging Face model with the provided prompt and context. | |
Args: | |
endpoint (str): The Hugging Face model endpoint URL. | |
token (str): The Hugging Face API token. | |
prompt (str): The user's prompt. | |
context (str): The context related to the prompt. | |
Returns: | |
str: The generated response from the Hugging Face model. | |
""" | |
import os | |
import requests | |
import json | |
headers = {"Authorization": f"Bearer {token}"} | |
combined_input = f"{context}\n\n{prompt}" if context else prompt | |
payload = {"inputs": combined_input} | |
try: | |
response = requests.post(endpoint, headers=headers, json=payload) | |
response.raise_for_status() | |
generated_text = response.json()[0]['generated_text'] | |
return generated_text | |
except requests.exceptions.RequestException as e: | |
print(f"Error calling Hugging Face API: {e}") | |
return None | |
def run_model(model_name, prompt, context=""): | |
""" | |
Runs the specified model with the given prompt and context. | |
Args: | |
model_name (str): The name of the model to run. | |
prompt (str): The user's prompt. | |
context (str, optional): The context related to the prompt. Defaults to "". | |
Returns: | |
str: The generated response from the model. | |
""" | |
from pymongo import MongoClient | |
from dotenv import load_dotenv | |
import os | |
# Load environment variables | |
load_dotenv() | |
# MongoDB connection | |
mongodb_uri = os.getenv('MONGODB_URI') | |
mongo_client = MongoClient(mongodb_uri) | |
db = mongo_client['llm_evaluation_system'] | |
users_collection = db['users'] | |
if model_name == "gpt-4o-mini": | |
return run_gpt4o_mini(prompt) | |
elif model_name == "gpt-4o": | |
return run_gpt4o(prompt) | |
elif model_name.startswith("HF_"): | |
# Fetch model details from the database | |
user = users_collection.find_one({"models.model_name": model_name}) | |
if user: | |
model = next((m for m in user['models'] if m['model_name'] == model_name), None) | |
if model: | |
return run_huggingface_model(model['model_link'], model['model_api_token'], prompt, context) | |
print(f"Hugging Face model {model_name} not found") | |
return None | |
else: | |
return run_custom_model(model_name, prompt) | |
# {{ edit_final: Add function to summarize images }} | |
def summarize_image(image_bytes: bytes) -> str: | |
try: | |
# Convert bytes to base64 | |
base64_image = base64.b64encode(image_bytes).decode('utf-8') | |
payload = { | |
"model": "gpt-4o-mini", | |
"messages": [ | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": "Please describe and summarize this image." | |
}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": f"data:image/jpeg;base64,{base64_image}" | |
} | |
} | |
] | |
} | |
], | |
"max_tokens": 300 | |
} | |
response = openai_client.chat.completions.create(**payload) | |
summary = response.choices[0].message.content.strip() | |
return summary | |
except Exception as e: | |
print(f"Error in summarize_image: {e}") | |
return "Failed to summarize the image." |