Spaces:
Running
Running
from openai import OpenAI | |
from langchain_openai import ChatOpenAI | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_core.prompts import PromptTemplate | |
def generate_response_openai(prompt: str,stream:bool = False,model = "gpt-4o-mini") -> str: | |
client = OpenAI() | |
response = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{"role": "user", "content": prompt} | |
], | |
stream=stream | |
) | |
return response.choices[0].message.content | |
def generate_response_via_langchain(query: str, stream: bool = False, model: str = "gpt-4o-mini") : | |
# Define the prompt template | |
template = "{query}" | |
prompt = PromptTemplate.from_template(template) | |
# Initialize the OpenAI LLM with the specified model | |
llm = ChatOpenAI(model=model) | |
# Create an LLM chain with the prompt and the LLM | |
llm_chain = prompt | llm | StrOutputParser() | |
if stream: | |
# Return a generator that yields streamed responses | |
return llm_chain.stream({"query": query}) | |
# Invoke the LLM chain and return the result | |
return llm_chain.invoke({"query": query}) | |