import json from enum import Enum from openai import OpenAI from anthropic import Anthropic import google.generativeai as genai from llama_index.core.llms import ChatMessage from prompts import LEGAL_POSITION_PROMPT, SYSTEM_PROMPT from config import anthropic_api_key class GenerationProvider(str, Enum): OPENAI = "openai" # GEMINI = "gemini" ANTHROPIC = "anthropic" class GenerationModelName(str, Enum): # OpenAI models GPT_MODEL_8 = "ft:gpt-4o-mini-2024-07-18:personal:legal-position-1500:Aaiu4WZd" GPT_MODEL_9 = "ft:gpt-4o-mini-2024-07-18:personal:legal-position-1700:AbNt5I2x" # Gemini models # GEMINI_FLASH = "gemini-1.5-flash" # Anthropic models # CLAUDE_3_HAIKU = "claude-3-haiku-20240307" ANTHROPIC_MODEL_4 = "claude-3-5-sonnet-latest" # CLAUDE_3_OPUS = "claude-3-opus-20240229" def generate_legal_position(court_decision_text: str, comment_input: str, provider: str, model_name: str) -> dict: if not isinstance(court_decision_text, str) or not court_decision_text.strip(): return { "title": "Invalid input", "text": "Court decision text is required and must be non-empty.", "proceeding": "Error", "category": "Error" } try: content = LEGAL_POSITION_PROMPT.format( court_decision_text=court_decision_text, comment=comment_input if comment_input else "Коментар відсутній" ) if provider == GenerationProvider.OPENAI.value: client = OpenAI() response = client.chat.completions.create( model=model_name, messages=[ {"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": content} ], response_format={"type": "json_object"}, temperature=0 ) parsed_response = json.loads(response.choices[0].message.content) # elif provider == GenerationProvider.GEMINI.value: # generation_config = { # "temperature": 0, # "max_output_tokens": 8192, # "response_mime_type": "application/json", # } # # model = genai.GenerativeModel( # model_name=model_name, # generation_config=generation_config, # ) # # chat = model.start_chat(history=[]) # response = chat.send_message( # f"{SYSTEM_PROMPT}\n\n{content}", # ) # parsed_response = json.loads(response.text) elif provider == GenerationProvider.ANTHROPIC.value: client = Anthropic(api_key=anthropic_api_key) json_instruction = """ Будь ласка, надай відповідь у форматі JSON з наступними полями: { "title": "заголовок правової позиції", "text": "текст правової позиції", "proceeding": "тип судочинства", "category": "категорія справи" } """ response = client.messages.create( model=model_name, max_tokens=4096, temperature=0, messages=[ { "role": "assistant", "content": "Ти - кваліфікований юрист-аналітик." }, { "role": "user", "content": f"{SYSTEM_PROMPT}\n{json_instruction}\n{content}" } ] ) parsed_response = json.loads(response.content[0].text) else: return { "title": "Error", "text": f"Unsupported provider: {provider}", "proceeding": "Error", "category": "Error" } # Перевірка та конвертація полів для всіх провайдерів if 'text_lp' in parsed_response and 'text' not in parsed_response: parsed_response['text'] = parsed_response.pop('text_lp') # Валідація результату required_fields = ["title", "text", "proceeding", "category"] if all(field in parsed_response for field in required_fields): return parsed_response missing_fields = [field for field in required_fields if field not in parsed_response] return { "title": parsed_response.get('title', 'Error'), "text": f"Missing required fields: {', '.join(missing_fields)}", "proceeding": parsed_response.get('proceeding', 'Error'), "category": parsed_response.get('category', 'Error') } except json.JSONDecodeError as e: return { "title": "Error parsing response", "text": f"Failed to parse JSON response: {str(e)}", "proceeding": "Error", "category": "Error" } except Exception as e: return { "title": "Error", "text": f"Unexpected error: {str(e)}", "proceeding": "Error", "category": "Error" }