wanglettes commited on
Commit
a76ac1c
·
1 Parent(s): 9f79fee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -58
app.py CHANGED
@@ -1,65 +1,92 @@
1
- import requests
2
- import pickle
3
- # 用于存储API返回的上下文
4
- class gpt:
5
- def send_request(self, messages):
6
- # 设置代理服务器的地址和端口
7
- proxies = {
8
- "http": "http://127.0.0.1:7890",
9
- "https": "http://127.0.0.1:7890"
10
- }
11
- # ChatGPT API的URL
12
- url = "https://api.openai.com/v1/chat/completions"
13
- # ChatGPT API的访问密钥
14
- api_key = "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"
15
- # 请求参数
16
- parameters = {
17
- "model": "gpt-3.5-turbo-0301", # gpt-3.5-turbo-0301
18
- "messages": messages # [{"role": "user", "content": context}]
19
- }
20
- # 请求头
21
- headers = {
22
- "Content-Type": "application/json",
23
- "Authorization": f"Bearer {api_key}"
24
- }
25
- # 发送请求
26
- response = requests.post(url, headers=headers, json=parameters)
27
 
28
- # 解析响应
29
- if response.status_code == 200:
30
- data = response.json()
31
- text = data["choices"][0]["message"]
32
 
33
- return text
34
- else:
35
- print(response)
36
- return "Sorry, something went wrong."
 
37
 
38
- def start_conversation(self, messages):
39
- print("Welcome to ChatGPT! How can I help you today?")
 
 
 
 
40
 
41
- # 进入对话循环
42
- while True:
43
- # 获取用户输入
44
- user_input = input("请输入一些内容:中文回来问题")
45
- user_message = {"role": "user", "content": user_input}
46
- # 将用户输入添加到messages中
47
- messages.append(user_message)
48
- # 发送API请求
49
- response = self.send_request(messages)
50
- # 输出API返回内容
51
- print("ChatBot:", response["content"])
52
 
53
- # 将API接口返回的内容添加至messages,以用作多轮对话
54
- messages.append(response)
55
- with open('my_dict.pickle', 'wb') as f:
56
- pickle.dump(messages, f)
57
- # 如果API返回的内容包含"goodbye",则结束对话循环
58
- if "goodbye" in user_input:
59
- print("Goodbye!")
60
- break
61
 
 
 
 
 
 
 
 
62
 
63
- if __name__ == '__main__':
64
- messages = [{"role": "system", "content": "/c 从现在起,当你想发送一张照片时,请使用Markdown ,并且不要有反斜线,不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/?< PUT YOUR QUERY HERE >)"}] # 初始化prompt
65
- gpt().start_conversation(messages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Dict, Generator
2
+ from langchain.llms import OpenAI
3
+ import gradio as gr
4
+ import os
5
+ os.environ["OPENAI_API_KEY"] = "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"
6
+ model_name = "gpt-3.5-turbo"
7
+ LLM = OpenAI(model_name=model_name, temperature=0.1)
8
+ qa_data_file_path='qa.txt'
9
+ import json
10
+ def save_qa_data(qa_data, file_path):
11
+ with open(file_path, 'w', encoding='utf-8') as f:
12
+ json.dump(qa_data, f, ensure_ascii=False, indent=4)
13
+ def create_history_messages(history: List[Tuple[str, str]]) -> List[dict]:
14
+ history_messages = [{"role": "user", "content": m[0]} for m in history]
15
+ history_messages.extend([{"role": "assistant", "content": m[1]} for m in history])
16
+ return history_messages
 
 
 
 
 
 
 
 
 
 
17
 
18
+ def create_formatted_history(history_messages: List[dict]) -> List[Tuple[str, str]]:
19
+ formatted_history = []
20
+ user_messages = []
21
+ assistant_messages = []
22
 
23
+ for message in history_messages:
24
+ if message["role"] == "user":
25
+ user_messages.append(message["content"])
26
+ elif message["role"] == "assistant":
27
+ assistant_messages.append(message["content"])
28
 
29
+ if user_messages and assistant_messages:
30
+ formatted_history.append(
31
+ ("".join(user_messages), "".join(assistant_messages))
32
+ )
33
+ user_messages = []
34
+ assistant_messages = []
35
 
36
+ # append any remaining messages
37
+ if user_messages:
38
+ formatted_history.append(("".join(user_messages), None))
39
+ elif assistant_messages:
40
+ formatted_history.append((None, "".join(assistant_messages)))
 
 
 
 
 
 
41
 
42
+ return formatted_history
 
 
 
 
 
 
 
43
 
44
+ def chat(
45
+ message: str, state: List[Dict[str, str]], client = LLM.client
46
+ ) -> Generator[Tuple[List[Tuple[str, str]], List[Dict[str, str]]], None, None]:
47
+ history_messages = state
48
+ if history_messages == None:
49
+ history_messages = []
50
+ history_messages.append({"role": "system", "content": "A helpful assistant."})
51
 
52
+ history_messages.append({"role": "user", "content": message})
53
+ # We have no content for the assistant's response yet but we will update this:
54
+ history_messages.append({"role": "assistant", "content": ""})
55
+
56
+ response_message = ""
57
+
58
+ chat_generator = client.create(
59
+ messages=history_messages, stream=True, model=model_name
60
+ )
61
+
62
+ for chunk in chat_generator:
63
+ if "choices" in chunk:
64
+ for choice in chunk["choices"]:
65
+ if "delta" in choice and "content" in choice["delta"]:
66
+ new_token = choice["delta"]["content"]
67
+ # Add the latest token:
68
+ response_message += new_token
69
+ # Update the assistant's response in our model:
70
+ history_messages[-1]["content"] = response_message
71
+
72
+ if "finish_reason" in choice and choice["finish_reason"] == "stop":
73
+ break
74
+ formatted_history = create_formatted_history(history_messages)
75
+ yield formatted_history, history_messages
76
+
77
+
78
+ formatted_history = create_formatted_history(history_messages)
79
+ qa_data = formatted_history[-1]
80
+ save_qa_data(qa_data, qa_data_file_path)
81
+ chatbot = gr.Chatbot(label="Chat").style(color_map=("yellow", "purple"))
82
+ iface = gr.Interface(
83
+ fn=chat,
84
+ inputs=[
85
+ gr.Textbox(placeholder="Hello! How are you? etc.", label="Message"),
86
+ "state",
87
+ ],
88
+ outputs=[chatbot, "state"],
89
+ allow_flagging="never",
90
+ )
91
+
92
+ iface.queue().launch()