subhrajit mohanty
commited on
Update OpenAI SDK version code to generate response
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import
|
2 |
from fastapi import FastAPI, HTTPException
|
3 |
from fastapi.responses import StreamingResponse
|
4 |
from fastapi.middleware.cors import CORSMiddleware
|
@@ -9,6 +9,7 @@ from tinydb import TinyDB
|
|
9 |
from tinydb import Query
|
10 |
from datetime import datetime
|
11 |
from utils import generate_token
|
|
|
12 |
|
13 |
query = Query()
|
14 |
db = TinyDB(".token.json")
|
@@ -53,18 +54,18 @@ class RefToken(BaseModel):
|
|
53 |
ref_key: str
|
54 |
|
55 |
def get_openai_generator(prompt: str):
|
56 |
-
|
|
|
|
|
57 |
model="gpt-3.5-turbo",
|
58 |
messages=[{"role": "user", "content": prompt}],
|
59 |
-
temperature=0.
|
60 |
-
stream=True
|
61 |
)
|
62 |
-
for
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
if "content" not in event["choices"][0].delta:
|
67 |
-
yield "[DONE]"
|
68 |
|
69 |
@app.get("/")
|
70 |
async def base_url():
|
@@ -96,7 +97,7 @@ async def list(token: str = Depends(verify_token)):
|
|
96 |
|
97 |
@app.post("/chat")
|
98 |
async def chat(chat_input: ChatInput, token: str = Depends(verify_token)):
|
99 |
-
|
100 |
prompt = f"User: {chat_input.message}\nAI:"
|
101 |
try:
|
102 |
return StreamingResponse(get_openai_generator(prompt), media_type='text/event-stream')
|
|
|
1 |
+
from openai import OpenAI
|
2 |
from fastapi import FastAPI, HTTPException
|
3 |
from fastapi.responses import StreamingResponse
|
4 |
from fastapi.middleware.cors import CORSMiddleware
|
|
|
9 |
from tinydb import Query
|
10 |
from datetime import datetime
|
11 |
from utils import generate_token
|
12 |
+
import os
|
13 |
|
14 |
query = Query()
|
15 |
db = TinyDB(".token.json")
|
|
|
54 |
ref_key: str
|
55 |
|
56 |
def get_openai_generator(prompt: str):
|
57 |
+
client = OpenAI()
|
58 |
+
|
59 |
+
openai_stream = client.chat.completions.create(
|
60 |
model="gpt-3.5-turbo",
|
61 |
messages=[{"role": "user", "content": prompt}],
|
62 |
+
temperature=0.7,
|
63 |
+
stream=True
|
64 |
)
|
65 |
+
for chunk in openai_stream:
|
66 |
+
chunk_data = chunk.to_dict()
|
67 |
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
68 |
+
yield "data: [DONE]\n\n"
|
|
|
|
|
69 |
|
70 |
@app.get("/")
|
71 |
async def base_url():
|
|
|
97 |
|
98 |
@app.post("/chat")
|
99 |
async def chat(chat_input: ChatInput, token: str = Depends(verify_token)):
|
100 |
+
os.environ["OPENAI_API_KEY"] = chat_input.openAI_token
|
101 |
prompt = f"User: {chat_input.message}\nAI:"
|
102 |
try:
|
103 |
return StreamingResponse(get_openai_generator(prompt), media_type='text/event-stream')
|