Spaces:
Sleeping
Sleeping
Ilyas KHIAT
commited on
Commit
·
fb4fd4c
1
Parent(s):
9c9b49f
delete docs
Browse files
main.py
CHANGED
@@ -141,13 +141,21 @@ import asyncio
|
|
141 |
|
142 |
GENERATION_TIMEOUT_SEC = 60
|
143 |
|
144 |
-
async def stream_generator(response,prompt):
|
|
|
145 |
async with async_timeout.timeout(GENERATION_TIMEOUT_SEC):
|
146 |
try:
|
147 |
async for chunk in response:
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
except asyncio.TimeoutError:
|
150 |
raise HTTPException(status_code=504, detail="Stream timed out")
|
|
|
151 |
|
152 |
|
153 |
@app.post("/generate-answer/")
|
|
|
141 |
|
142 |
GENERATION_TIMEOUT_SEC = 60
|
143 |
|
144 |
+
async def stream_generator(response, prompt):
|
145 |
+
buffer = '' # Buffer to accumulate chunks
|
146 |
async with async_timeout.timeout(GENERATION_TIMEOUT_SEC):
|
147 |
try:
|
148 |
async for chunk in response:
|
149 |
+
buffer += chunk
|
150 |
+
try:
|
151 |
+
data = json.loads(buffer) # Try to parse the accumulated buffer
|
152 |
+
yield json.dumps({"prompt": prompt, "content": data})
|
153 |
+
buffer = '' # Clear the buffer after successful parsing
|
154 |
+
except json.JSONDecodeError:
|
155 |
+
continue # Continue accumulating data if JSON is incomplete
|
156 |
except asyncio.TimeoutError:
|
157 |
raise HTTPException(status_code=504, detail="Stream timed out")
|
158 |
+
|
159 |
|
160 |
|
161 |
@app.post("/generate-answer/")
|
rag.py
CHANGED
@@ -107,7 +107,7 @@ def generate_response_via_langchain(query: str, stream: bool = False, model: str
|
|
107 |
prompt = PromptTemplate.from_template(template)
|
108 |
|
109 |
# Initialize the OpenAI LLM with the specified model
|
110 |
-
llm = ChatOpenAI(model=model)
|
111 |
|
112 |
# Create an LLM chain with the prompt and the LLM
|
113 |
llm_chain = prompt | llm | StrOutputParser()
|
|
|
107 |
prompt = PromptTemplate.from_template(template)
|
108 |
|
109 |
# Initialize the OpenAI LLM with the specified model
|
110 |
+
llm = ChatOpenAI(model=model,temperature=0)
|
111 |
|
112 |
# Create an LLM chain with the prompt and the LLM
|
113 |
llm_chain = prompt | llm | StrOutputParser()
|