Tafar commited on
Commit
45cdeaf
·
1 Parent(s): b554bef

Upload 2 files

Browse files
service_src_chatgpt_index.ts ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import * as dotenv from 'dotenv'
2
+ import 'isomorphic-fetch'
3
+ import type { ChatGPTAPIOptions, ChatMessage, SendMessageOptions } from 'chatgpt'
4
+ import { ChatGPTAPI, ChatGPTUnofficialProxyAPI } from 'chatgpt'
5
+ import { SocksProxyAgent } from 'socks-proxy-agent'
6
+ import httpsProxyAgent from 'https-proxy-agent'
7
+ import fetch from 'node-fetch'
8
+ import { sendResponse } from '../utils'
9
+ import { isNotEmptyString } from '../utils/is'
10
+ import type { ApiModel, ChatContext, ChatGPTUnofficialProxyAPIOptions, ModelConfig } from '../types'
11
+ import type { RequestOptions, SetProxyOptions, UsageResponse } from './types'
12
+
13
+ const { HttpsProxyAgent } = httpsProxyAgent
14
+
15
+ dotenv.config()
16
+
17
+ const ErrorCodeMessage: Record<string, string> = {
18
+ 401: '[OpenAI] Đã cung cấp khóa API không chính xác | Incorrect API key provided',
19
+ 403: '[OpenAI] Máy chủ từ chối truy cập, vui lòng thử lại sau | Server refused to access, please try again later',
20
+ 502: '[OpenAI] Cổng xấu | Bad Gateway',
21
+ 503: '[OpenAI] Máy chủ đang bận, vui lòng thử lại sau | Server is busy, please try again later',
22
+ 504: '[OpenAI] Hết thời gian chờ | Gateway Time-out',
23
+ 500: '[OpenAI] Lỗi máy chủ nội bộ | Internal Server Error',
24
+ 429: '[OpenAI] Máy chủ quá tải | Server overloaded',
25
+ }
26
+
27
+ const timeoutMs: number = !isNaN(+process.env.TIMEOUT_MS) ? +process.env.TIMEOUT_MS : 100 * 1000
28
+ const disableDebug: boolean = process.env.OPENAI_API_DISABLE_DEBUG === 'true'
29
+
30
+ let apiModel: ApiModel
31
+ const model = isNotEmptyString(process.env.OPENAI_API_MODEL) ? process.env.OPENAI_API_MODEL : 'gpt-3.5-turbo'
32
+
33
+ if (!isNotEmptyString(process.env.OPENAI_API_KEY) && !isNotEmptyString(process.env.OPENAI_ACCESS_TOKEN))
34
+ throw new Error('Missing OPENAI_API_KEY or OPENAI_ACCESS_TOKEN environment variable')
35
+
36
+ let api: ChatGPTAPI | ChatGPTUnofficialProxyAPI
37
+
38
+ (async () => {
39
+ // More Info: https://github.com/transitive-bullshit/chatgpt-api
40
+
41
+ if (isNotEmptyString(process.env.OPENAI_API_KEY)) {
42
+ const OPENAI_API_BASE_URL = process.env.OPENAI_API_BASE_URL
43
+
44
+ let randomApiKey = process.env.OPENAI_API_KEY;
45
+
46
+ if (isNotEmptyString(process.env.OPENAI_API_KEY_ARR)){
47
+ const OPENAI_API_KEY_ARR = JSON.parse(process.env.OPENAI_API_KEY_ARR);
48
+ const randomIndex = Math.floor(Math.random() * OPENAI_API_KEY_ARR.length);
49
+ randomApiKey = OPENAI_API_KEY_ARR[randomIndex];
50
+ }
51
+
52
+ const options: ChatGPTAPIOptions = {
53
+ apiKey: randomApiKey,
54
+ completionParams: { model },
55
+ debug: !disableDebug,
56
+ }
57
+
58
+ // increase max token limit if use gpt-4
59
+ if (model.toLowerCase().includes('gpt-4')) {
60
+ // if use 32k model
61
+ if (model.toLowerCase().includes('32k')) {
62
+ options.maxModelTokens = 32768
63
+ options.maxResponseTokens = 8192
64
+ }
65
+ else {
66
+ options.maxModelTokens = 8192
67
+ options.maxResponseTokens = 2048
68
+ }
69
+ }
70
+
71
+ if (isNotEmptyString(OPENAI_API_BASE_URL))
72
+ options.apiBaseUrl = `${OPENAI_API_BASE_URL}/v1`
73
+
74
+ setupProxy(options)
75
+
76
+ api = new ChatGPTAPI({ ...options })
77
+ apiModel = 'ChatGPTAPI'
78
+ }
79
+ else {
80
+ console.log('OPENAI_ACCESS_TOKEN',OPENAI_ACCESS_TOKEN);
81
+ const options: ChatGPTUnofficialProxyAPIOptions = {
82
+ accessToken: process.env.OPENAI_ACCESS_TOKEN,
83
+ apiReverseProxyUrl: isNotEmptyString(process.env.API_REVERSE_PROXY) ? process.env.API_REVERSE_PROXY : 'https://ai.fakeopen.com/api/conversation',
84
+ model,
85
+ debug: !disableDebug,
86
+ }
87
+
88
+ setupProxy(options)
89
+
90
+ api = new ChatGPTUnofficialProxyAPI({ ...options })
91
+ apiModel = 'ChatGPTUnofficialProxyAPI'
92
+ }
93
+ })()
94
+
95
+ async function chatReplyProcess(options: RequestOptions) {
96
+ const { message, lastContext, process, systemMessage, temperature, top_p } = options
97
+ try {
98
+ let options: SendMessageOptions = { timeoutMs }
99
+
100
+ if (apiModel === 'ChatGPTAPI') {
101
+ if (isNotEmptyString(systemMessage))
102
+ options.systemMessage = systemMessage
103
+ options.completionParams = { model, temperature, top_p }
104
+ }
105
+
106
+ if (lastContext != null) {
107
+ if (apiModel === 'ChatGPTAPI')
108
+ options.parentMessageId = lastContext.parentMessageId
109
+ else
110
+ options = { ...lastContext }
111
+ }
112
+
113
+ const response = await api.sendMessage(message, {
114
+ ...options,
115
+ onProgress: (partialResponse) => {
116
+ process?.(partialResponse)
117
+ },
118
+ })
119
+
120
+ return sendResponse({ type: 'Success', data: response })
121
+ }
122
+ catch (error: any) {
123
+ const code = error.statusCode
124
+ global.console.log(error)
125
+ if (Reflect.has(ErrorCodeMessage, code))
126
+ return sendResponse({ type: 'Fail', message: ErrorCodeMessage[code] })
127
+ return sendResponse({ type: 'Fail', message: error.message ?? 'Please check the back-end console' })
128
+ }
129
+ }
130
+
131
+ async function fetchUsage() {
132
+ let OPENAI_API_KEY = process.env.OPENAI_API_KEY
133
+ const OPENAI_API_BASE_URL = process.env.OPENAI_API_BASE_URL
134
+
135
+ if (isNotEmptyString(process.env.OPENAI_API_KEY_ARR)){
136
+ const OPENAI_API_KEY_ARR = JSON.parse(process.env.OPENAI_API_KEY_ARR);
137
+ const randomIndex = Math.floor(Math.random() * OPENAI_API_KEY_ARR.length);
138
+ OPENAI_API_KEY = OPENAI_API_KEY_ARR[randomIndex];
139
+ }
140
+
141
+ if (!isNotEmptyString(OPENAI_API_KEY))
142
+ return Promise.resolve('-')
143
+
144
+ const API_BASE_URL = isNotEmptyString(OPENAI_API_BASE_URL)
145
+ ? OPENAI_API_BASE_URL
146
+ : 'https://api.openai.com'
147
+
148
+ const [startDate, endDate] = formatDate()
149
+
150
+ // 每月使用量
151
+ const urlUsage = `${API_BASE_URL}/v1/dashboard/billing/usage?start_date=${startDate}&end_date=${endDate}`
152
+
153
+ const headers = {
154
+ 'Authorization': `Bearer ${OPENAI_API_KEY}`,
155
+ 'Content-Type': 'application/json',
156
+ }
157
+
158
+ const options = {} as SetProxyOptions
159
+
160
+ setupProxy(options)
161
+
162
+ try {
163
+ // 获取已使用量
164
+ const useResponse = await options.fetch(urlUsage, { headers })
165
+ if (!useResponse.ok)
166
+ throw new Error('获取使用量失败')
167
+ const usageData = await useResponse.json() as UsageResponse
168
+ const usage = Math.round(usageData.total_usage) / 100
169
+ return Promise.resolve(usage ? `$${usage}` : '-')
170
+ }
171
+ catch (error) {
172
+ global.console.log(error)
173
+ return Promise.resolve('-')
174
+ }
175
+ }
176
+
177
+ function formatDate(): string[] {
178
+ const today = new Date()
179
+ const year = today.getFullYear()
180
+ const month = today.getMonth() + 1
181
+ const lastDay = new Date(year, month, 0)
182
+ const formattedFirstDay = `${year}-${month.toString().padStart(2, '0')}-01`
183
+ const formattedLastDay = `${year}-${month.toString().padStart(2, '0')}-${lastDay.getDate().toString().padStart(2, '0')}`
184
+ return [formattedFirstDay, formattedLastDay]
185
+ }
186
+
187
+ async function chatConfig() {
188
+ const usage = await fetchUsage()
189
+ const reverseProxy = process.env.API_REVERSE_PROXY ?? '-'
190
+ const httpsProxy = (process.env.HTTPS_PROXY || process.env.ALL_PROXY) ?? '-'
191
+ const socksProxy = (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT)
192
+ ? (`${process.env.SOCKS_PROXY_HOST}:${process.env.SOCKS_PROXY_PORT}`)
193
+ : '-'
194
+ return sendResponse<ModelConfig>({
195
+ type: 'Success',
196
+ data: { apiModel, reverseProxy, timeoutMs, socksProxy, httpsProxy, usage },
197
+ })
198
+ }
199
+
200
+ function setupProxy(options: SetProxyOptions) {
201
+ if (isNotEmptyString(process.env.SOCKS_PROXY_HOST) && isNotEmptyString(process.env.SOCKS_PROXY_PORT)) {
202
+ const agent = new SocksProxyAgent({
203
+ hostname: process.env.SOCKS_PROXY_HOST,
204
+ port: process.env.SOCKS_PROXY_PORT,
205
+ userId: isNotEmptyString(process.env.SOCKS_PROXY_USERNAME) ? process.env.SOCKS_PROXY_USERNAME : undefined,
206
+ password: isNotEmptyString(process.env.SOCKS_PROXY_PASSWORD) ? process.env.SOCKS_PROXY_PASSWORD : undefined,
207
+ })
208
+ options.fetch = (url, options) => {
209
+ return fetch(url, { agent, ...options })
210
+ }
211
+ }
212
+ else if (isNotEmptyString(process.env.HTTPS_PROXY) || isNotEmptyString(process.env.ALL_PROXY)) {
213
+ const httpsProxy = process.env.HTTPS_PROXY || process.env.ALL_PROXY
214
+ if (httpsProxy) {
215
+ const agent = new HttpsProxyAgent(httpsProxy)
216
+ options.fetch = (url, options) => {
217
+ return fetch(url, { agent, ...options })
218
+ }
219
+ }
220
+ }
221
+ else {
222
+ options.fetch = (url, options) => {
223
+ return fetch(url, { ...options })
224
+ }
225
+ }
226
+ }
227
+
228
+ function currentModel(): ApiModel {
229
+ return apiModel
230
+ }
231
+
232
+ export type { ChatContext, ChatMessage }
233
+
234
+ export { chatReplyProcess, chatConfig, currentModel }
service_src_chatgpt_types.ts ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { ChatMessage } from 'chatgpt'
2
+ import type fetch from 'node-fetch'
3
+
4
+ export interface RequestOptions {
5
+ message: string
6
+ lastContext?: { conversationId?: string; parentMessageId?: string }
7
+ process?: (chat: ChatMessage) => void
8
+ systemMessage?: string
9
+ temperature?: number
10
+ top_p?: number
11
+ }
12
+
13
+ export interface SetProxyOptions {
14
+ fetch?: typeof fetch
15
+ }
16
+
17
+ export interface UsageResponse {
18
+ total_usage: number
19
+ }