ponix-generator / llm_wrapper.py
cwhuh's picture
fix : google-genai -> openai
dc592b9
import openai, os, json
prompt_base_path = ""
client = openai.OpenAI(
api_key=os.getenv("GEMINI_API_KEY"),
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
)
def run_gemini(
target_prompt: str,
prompt_in_path: str,
llm_model: str = "gemini-2.0-flash-exp",
) -> str:
"""
gemini 모델 사용 코드
"""
# Load prompt
with open(
os.path.join(prompt_base_path, prompt_in_path), "r", encoding="utf-8"
) as file:
prompt_dict = json.load(file)
system_prompt = prompt_dict["system_prompt"]
user_prompt_head, user_prompt_tail = (
prompt_dict["user_prompt"]["head"],
prompt_dict["user_prompt"]["tail"],
)
user_prompt_text = "\n".join([user_prompt_head, target_prompt, user_prompt_tail])
input_content = [{"type": "text", "text": user_prompt_text}]
chat_completion = client.beta.chat.completions.parse(
model=llm_model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": input_content},
],
)
chat_output = chat_completion.choices[0].message.content
return chat_output