subhrajit mohanty commited on
Commit
dc0d5b8
·
verified ·
1 Parent(s): af6a6f0

Update OpenAI SDK version code to generate response

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import openai
2
  from fastapi import FastAPI, HTTPException
3
  from fastapi.responses import StreamingResponse
4
  from fastapi.middleware.cors import CORSMiddleware
@@ -9,6 +9,7 @@ from tinydb import TinyDB
9
  from tinydb import Query
10
  from datetime import datetime
11
  from utils import generate_token
 
12
 
13
  query = Query()
14
  db = TinyDB(".token.json")
@@ -53,18 +54,18 @@ class RefToken(BaseModel):
53
  ref_key: str
54
 
55
  def get_openai_generator(prompt: str):
56
- openai_stream = openai.ChatCompletion.create(
 
 
57
  model="gpt-3.5-turbo",
58
  messages=[{"role": "user", "content": prompt}],
59
- temperature=0.0,
60
- stream=True,
61
  )
62
- for event in openai_stream:
63
- if "content" in event["choices"][0].delta:
64
- current_response = event["choices"][0].delta.content
65
- yield current_response
66
- if "content" not in event["choices"][0].delta:
67
- yield "[DONE]"
68
 
69
  @app.get("/")
70
  async def base_url():
@@ -96,7 +97,7 @@ async def list(token: str = Depends(verify_token)):
96
 
97
  @app.post("/chat")
98
  async def chat(chat_input: ChatInput, token: str = Depends(verify_token)):
99
- openai.api_key = chat_input.openAI_token
100
  prompt = f"User: {chat_input.message}\nAI:"
101
  try:
102
  return StreamingResponse(get_openai_generator(prompt), media_type='text/event-stream')
 
1
+ from openai import OpenAI
2
  from fastapi import FastAPI, HTTPException
3
  from fastapi.responses import StreamingResponse
4
  from fastapi.middleware.cors import CORSMiddleware
 
9
  from tinydb import Query
10
  from datetime import datetime
11
  from utils import generate_token
12
+ import os
13
 
14
  query = Query()
15
  db = TinyDB(".token.json")
 
54
  ref_key: str
55
 
56
  def get_openai_generator(prompt: str):
57
+ client = OpenAI()
58
+
59
+ openai_stream = client.chat.completions.create(
60
  model="gpt-3.5-turbo",
61
  messages=[{"role": "user", "content": prompt}],
62
+ temperature=0.7,
63
+ stream=True
64
  )
65
+ for chunk in openai_stream:
66
+ chunk_data = chunk.to_dict()
67
+ yield f"data: {json.dumps(chunk_data)}\n\n"
68
+ yield "data: [DONE]\n\n"
 
 
69
 
70
  @app.get("/")
71
  async def base_url():
 
97
 
98
  @app.post("/chat")
99
  async def chat(chat_input: ChatInput, token: str = Depends(verify_token)):
100
+ os.environ["OPENAI_API_KEY"] = chat_input.openAI_token
101
  prompt = f"User: {chat_input.message}\nAI:"
102
  try:
103
  return StreamingResponse(get_openai_generator(prompt), media_type='text/event-stream')