ai_agents / utils /audit /response_llm.py
Ilyas KHIAT
first push
56a3465
raw
history blame
1.16 kB
from openai import OpenAI
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
def generate_response_openai(prompt: str,stream:bool = False,model = "gpt-4o-mini") -> str:
client = OpenAI()
response = client.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": prompt}
],
stream=stream
)
return response.choices[0].message.content
def generate_response_via_langchain(query: str, stream: bool = False, model: str = "gpt-4o-mini") :
# Define the prompt template
template = "{query}"
prompt = PromptTemplate.from_template(template)
# Initialize the OpenAI LLM with the specified model
llm = ChatOpenAI(model=model)
# Create an LLM chain with the prompt and the LLM
llm_chain = prompt | llm | StrOutputParser()
if stream:
# Return a generator that yields streamed responses
return llm_chain.stream({"query": query})
# Invoke the LLM chain and return the result
return llm_chain.invoke({"query": query})