Spaces:
Running
Running
Update bin_public/app/chat_func.py
Browse files- bin_public/app/chat_func.py +17 -1
bin_public/app/chat_func.py
CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
3 |
|
4 |
import urllib3
|
5 |
|
|
|
6 |
from tqdm import tqdm
|
7 |
from duckduckgo_search import ddg
|
8 |
from llama_func import *
|
@@ -460,4 +461,19 @@ def reduce_token_size(
|
|
460 |
yield chatbot, history, construct_token_message(
|
461 |
sum(token_count), stream=stream
|
462 |
), token_count
|
463 |
-
logging.info("减少token数量完毕")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
import urllib3
|
5 |
|
6 |
+
import openai
|
7 |
from tqdm import tqdm
|
8 |
from duckduckgo_search import ddg
|
9 |
from llama_func import *
|
|
|
461 |
yield chatbot, history, construct_token_message(
|
462 |
sum(token_count), stream=stream
|
463 |
), token_count
|
464 |
+
logging.info("减少token数量完毕")
|
465 |
+
|
466 |
+
|
467 |
+
def predict_davinci(api_key, input, temperature, history=None):
|
468 |
+
if history is None:
|
469 |
+
history = []
|
470 |
+
s = list(sum(history, ()))
|
471 |
+
s.append(input)
|
472 |
+
openai.api_key = api_key
|
473 |
+
response = openai.Completion.create(
|
474 |
+
engine="text-davinci-003",
|
475 |
+
prompt=s,
|
476 |
+
temperature=temperature,
|
477 |
+
max_tokens=2048,
|
478 |
+
)
|
479 |
+
return response.choices[0].text
|