File size: 5,792 Bytes
a2a4c11
 
 
10ba055
 
 
 
 
a2a4c11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10ba055
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2a4c11
10ba055
a2a4c11
10ba055
 
 
 
 
 
 
 
 
 
a2a4c11
10ba055
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import os
from openai import OpenAI
from dotenv import load_dotenv
import openai
import io
from PIL import Image
import base64  # {{ edit_add: Import base64 for image conversion }}
import requests  # Add this import for making HTTP requests to Hugging Face

# Load environment variables
load_dotenv()

# Initialize OpenAI client with API key from .env
openai_client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))

def run_gpt4o_mini(question):
    try:
        # Check if the question is a dictionary and extract the prompt
        if isinstance(question, dict) and 'prompt' in question:
            question_text = question['prompt']
        else:
            question_text = str(question)

        response = openai_client.chat.completions.create(
            model="gpt-4o-mini",
            messages=[
                {"role": "system", "content": "You are a helpful assistant. Answer the question to the best of your ability."},
                {"role": "user", "content": question_text}
            ]
        )
        return response.choices[0].message.content
    except Exception as e:
        print(f"Error running GPT-4o-mini: {str(e)}")
        return None

def run_gpt4o(question):
    try:
        # Check if the question is a dictionary and extract the prompt
        if isinstance(question, dict) and 'prompt' in question:
            question_text = question['prompt']
        else:
            question_text = str(question)

        response = openai_client.chat.completions.create(
            model="gpt-4o-mini",
            messages=[
                {"role": "system", "content": "You are a helpful assistant. Answer the question to the best of your ability."},
                {"role": "user", "content": question_text}
            ]
        )
        return response.choices[0].message.content
    except Exception as e:
        print(f"Error running GPT-4o-mini: {str(e)}")
        return None

def run_custom_model(model_name, question):
    # Placeholder for custom model logic
    # You'll need to implement this based on how your custom models work
    return f"Custom model {model_name} response: This is a placeholder answer for the question provided."

def run_huggingface_model(endpoint, token, prompt, context):
    """
    Runs the Hugging Face model with the provided prompt and context.

    Args:
        endpoint (str): The Hugging Face model endpoint URL.
        token (str): The Hugging Face API token.
        prompt (str): The user's prompt.
        context (str): The context related to the prompt.

    Returns:
        str: The generated response from the Hugging Face model.
    """
    import os
    import requests
    import json

    headers = {"Authorization": f"Bearer {token}"}
    combined_input = f"{context}\n\n{prompt}" if context else prompt
    payload = {"inputs": combined_input}

    try:
        response = requests.post(endpoint, headers=headers, json=payload)
        response.raise_for_status()
        generated_text = response.json()[0]['generated_text']
        return generated_text
    except requests.exceptions.RequestException as e:
        print(f"Error calling Hugging Face API: {e}")
        return None

def run_model(model_name, prompt, context=""):
    """
    Runs the specified model with the given prompt and context.

    Args:
        model_name (str): The name of the model to run.
        prompt (str): The user's prompt.
        context (str, optional): The context related to the prompt. Defaults to "".

    Returns:
        str: The generated response from the model.
    """
    from pymongo import MongoClient
    from dotenv import load_dotenv
    import os

    # Load environment variables
    load_dotenv()

    # MongoDB connection
    mongodb_uri = os.getenv('MONGODB_URI')
    mongo_client = MongoClient(mongodb_uri)
    db = mongo_client['llm_evaluation_system']
    users_collection = db['users']

    if model_name == "gpt-4o-mini":
        return run_gpt4o_mini(prompt)
    elif model_name == "gpt-4o":
        return run_gpt4o(prompt)
    elif model_name.startswith("HF_"):
        # Fetch model details from the database
        user = users_collection.find_one({"models.model_name": model_name})
        if user:
            model = next((m for m in user['models'] if m['model_name'] == model_name), None)
            if model:
                return run_huggingface_model(model['model_link'], model['model_api_token'], prompt, context)
        print(f"Hugging Face model {model_name} not found")
        return None
    else:
        return run_custom_model(model_name, prompt)

# {{ edit_final: Add function to summarize images }}
def summarize_image(image_bytes: bytes) -> str:
    try:
        # Convert bytes to base64
        base64_image = base64.b64encode(image_bytes).decode('utf-8')
        
        payload = {
            "model": "gpt-4o-mini",
            "messages": [
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "text",
                            "text": "Please describe and summarize this image."
                        },
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/jpeg;base64,{base64_image}"
                            }
                        }
                    ]
                }
            ],
            "max_tokens": 300
        }

        response = openai_client.chat.completions.create(**payload)
        
        summary = response.choices[0].message.content.strip()
        return summary

    except Exception as e:
        print(f"Error in summarize_image: {e}")
        return "Failed to summarize the image."