hugforziio commited on
Commit
90fe6c7
·
1 Parent(s): 2c6abb3

Create functions.py

Browse files
Files changed (1) hide show
  1. functions.py +552 -0
functions.py ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ import gradio
3
+ # import lmdb
4
+ # import base64
5
+ # import io
6
+ import random
7
+ import time
8
+ import os
9
+ import re
10
+ import json
11
+ import copy
12
+ # import sqlite3
13
+ import hashlib
14
+ import uuid
15
+ from urllib.parse import urljoin
16
+ import openai
17
+
18
+
19
+ def get_random_sleep(base_time, random_range):
20
+ return (base_time + random.randint(-random_range, random_range))*0.001
21
+
22
+
23
+ def js_load(txt):
24
+ try:
25
+ return json.loads(txt)
26
+ except Exception as error:
27
+ print('')
28
+ print('js_load:')
29
+ print(str(error))
30
+ print('')
31
+ return None
32
+
33
+
34
+ def js_dump(thing):
35
+ try:
36
+ return json.dumps(thing)
37
+ except Exception as error:
38
+ print('')
39
+ print('js_dump:')
40
+ print(str(error))
41
+ print('')
42
+ return None
43
+
44
+
45
+ def filtered_history(history, num=0):
46
+ if num > 0:
47
+ filtered = list(filter(lambda it:(it['type'] in ['request', 'response']), history))
48
+ return filtered[-num:]
49
+ return []
50
+
51
+
52
+ def filtered_history_messages(history, num=0):
53
+ filtered = filtered_history(history, num)
54
+ return list(map(lambda it:{'role': it.get('role'), 'content': it.get('content')}, filtered))
55
+
56
+
57
+ def make_md_line(role, content):
58
+ return f"""\n##### `{role}`\n\n{content}\n"""
59
+
60
+
61
+ def make_md_by_history(history):
62
+ md = ""
63
+ for item in history:
64
+ md += make_md_line(item.get('role'), item.get('content'))
65
+ return md
66
+
67
+
68
+ def make_history_file_fn(history):
69
+
70
+ uuid4 = str(uuid.uuid4())
71
+ json_file_path = None
72
+ md_file_path = None
73
+
74
+ try:
75
+ # 如果目录不存在,则创建目录
76
+ os.makedirs('temp_files', exist_ok=True)
77
+
78
+ json_file_content = json.dumps(history)
79
+ json_file_path = os.path.join('temp_files', f'history[{uuid4}].json')
80
+ with open(json_file_path, 'w') as f:
81
+ f.write(json_file_content)
82
+
83
+ md_file_content = make_md_by_history(history)
84
+ md_file_path = os.path.join('temp_files', f'history[{uuid4}].md')
85
+ with open(md_file_path, 'w') as f:
86
+ f.write(md_file_content)
87
+
88
+ return json_file_path, md_file_path, gradio.update(visible=True)
89
+
90
+ except Exception as error:
91
+ print(f"\n{error}\n")
92
+
93
+ return json_file_path, md_file_path, gradio.update(visible=True)
94
+
95
+
96
+ def make_history_file_fn__(history):
97
+ uuid4 = str(uuid.uuid4())
98
+ try:
99
+ json_file_content = json.dumps(history)
100
+ json_file_path = os.path.join('temp_files', f'history[{uuid4}].json')
101
+ with open(json_file_path, 'w') as f:
102
+ f.write(json_file_content)
103
+ except Exception as error:
104
+ print(f"\n{error}\n")
105
+ json_file_path = None
106
+ try:
107
+ md_file_content = make_md_by_history(history)
108
+ md_file_path = os.path.join('temp_files', f'history[{uuid4}].md')
109
+ with open(md_file_path, 'w') as f:
110
+ f.write(md_file_content)
111
+ except Exception as error:
112
+ print(f"\n{error}\n")
113
+ md_file_path = None
114
+
115
+ return json_file_path, md_file_path, gradio.update(visible=True)
116
+
117
+
118
+ def make_user_message_list_fn__(
119
+ user_message_template, # 模板,套用到每一条消息上
120
+ user_message_template_mask, # 模板中要被替换的部分
121
+ user_message_template_mask_is_regex, # 决定如何构造用于替换的正则表达式
122
+ user_message_list_text, # 一段文本,包含了每一条用户消息
123
+ user_message_list_text_splitter, # 描述了应该以什么为线索来切分 user_message_list_text
124
+ user_message_list_text_splitter_is_regex, # 决定如何进行切分
125
+ ) -> list:
126
+ # 返回套用了模板的用户信息列表
127
+ # 这个实现首先根据是否使用正则表达式来切分用户消息列表文本,并将切分后的消息存储在一个列表中。
128
+ # 然后,针对每个消息,根据user_message_template_mask及user_message_template_mask_is_regex替换模板中的部分内容,
129
+ # 并将替换后的结果添加到结果列表中。
130
+ # 最后,返回结果列表。
131
+
132
+ # 切分用户消息列表文本
133
+ if user_message_list_text_splitter_is_regex:
134
+ user_messages = re.split(user_message_list_text_splitter, user_message_list_text)
135
+ else:
136
+ user_messages = user_message_list_text.split(user_message_list_text_splitter)
137
+
138
+ # 生成套用模板的用户信息列表
139
+ user_message_result_list = []
140
+ for message in user_messages:
141
+ # 替换模板内容
142
+ if user_message_template_mask_is_regex:
143
+ transformed_message = re.sub(user_message_template_mask, message, user_message_template)
144
+ else:
145
+ transformed_message = user_message_template.replace(user_message_template_mask, message)
146
+
147
+ user_message_result_list.append(transformed_message)
148
+
149
+ return user_message_result_list
150
+
151
+
152
+ def make_user_message_list_fn(
153
+ user_message_template,
154
+ user_message_template_mask,
155
+ user_message_template_mask_is_regex,
156
+ user_message_list_text,
157
+ user_message_list_text_splitter,
158
+ user_message_list_text_splitter_is_regex,
159
+ ) -> list:
160
+
161
+ # 实际上,只要保证在使用正则表达式进行替换或切分操作之前,已经将其编译为正则表达式对象即可。
162
+ # 在我的修改中,针对 xxx_is_regex 参数为 True 的情况,将这些参数编译成正则表达式。
163
+ # 对于替换操作和切分操作,只需检查是否已经编译为正则表达式,并使用相应的方法即可。
164
+
165
+ # 编译正则表达式
166
+ if user_message_template_mask_is_regex:
167
+ user_message_template_mask = re.compile(user_message_template_mask)
168
+
169
+ if user_message_list_text_splitter_is_regex:
170
+ user_message_list_text_splitter = re.compile(user_message_list_text_splitter)
171
+
172
+ # 切分用户消息列表文本
173
+ if user_message_list_text_splitter_is_regex:
174
+ user_messages = user_message_list_text_splitter.split(user_message_list_text)
175
+ else:
176
+ user_messages = user_message_list_text.split(user_message_list_text_splitter)
177
+
178
+ # 生成套用模板的用户信息列表
179
+ user_message_result_list = []
180
+ for message in user_messages:
181
+ # 替换模板内容
182
+ if user_message_template_mask_is_regex:
183
+ transformed_message = user_message_template_mask.sub(message, user_message_template)
184
+ else:
185
+ transformed_message = user_message_template.replace(user_message_template_mask, message)
186
+
187
+ user_message_result_list.append(transformed_message)
188
+
189
+ return user_message_result_list
190
+
191
+
192
+ def sequential_chat_once_fn(payload, api_key_text, history, history_md_stable, history_md_stream, tips):
193
+ print("\n\n")
194
+
195
+ assistant_message = ""
196
+ tips = ""
197
+
198
+ try:
199
+ openai.api_key = api_key_text
200
+ completion = openai.ChatCompletion.create(**payload)
201
+
202
+ if payload.get('stream'):
203
+ is_first=True
204
+ for chunk in completion:
205
+ if is_first:
206
+ is_first = False
207
+ continue
208
+ if chunk.choices[0].finish_reason is None:
209
+ print(chunk.choices[0].delta.content or '')
210
+ assistant_message += chunk.choices[0].delta.content or ''
211
+ history_md_stream = make_md_line('assistant', assistant_message)
212
+ tips = 'streaming'
213
+ yield assistant_message, history_md_stream, tips, history
214
+ else:
215
+ pass
216
+ pass
217
+ pass
218
+ else:
219
+ assistant_message = completion.choices[0].message.content
220
+ history_md_stream = make_md_line('assistant', assistant_message)
221
+ tips = 'got'
222
+ print(assistant_message)
223
+ yield assistant_message, history_md_stream, tips, history
224
+ pass
225
+
226
+ except Exception as error:
227
+ tips = str(error)
228
+ history.append({"role": "app", "content": tips})
229
+ print(f"\n{tips}\n")
230
+ yield assistant_message, history_md_stream, tips, history
231
+ pass
232
+
233
+ print("\n\n")
234
+
235
+
236
+ def sequential_chat_fn(
237
+ history,
238
+
239
+ system_prompt_enabled,
240
+ system_prompt,
241
+ user_message_template,
242
+ user_message_template_mask,
243
+ user_message_template_mask_is_regex,
244
+ user_message_list_text,
245
+ user_message_list_text_splitter,
246
+ user_message_list_text_splitter_is_regex,
247
+ history_prompt_num,
248
+
249
+ api_key_text, token_text,
250
+
251
+ sleep_base, sleep_rand,
252
+
253
+ prop_stream, prop_model, prop_temperature, prop_top_p, prop_choices_num, prop_max_tokens, prop_presence_penalty, prop_frequency_penalty, prop_logit_bias,
254
+ ):
255
+ # outputs=[
256
+ # history,
257
+ # history_md_stable,
258
+ # history_md_stream,
259
+ # tips,
260
+ # file_row,
261
+ # ],
262
+
263
+ history_md_stable = ""
264
+ history_md_stream = ""
265
+ tips = ""
266
+
267
+ try:
268
+
269
+ user_message_list = make_user_message_list_fn(
270
+ user_message_template,
271
+ user_message_template_mask,
272
+ user_message_template_mask_is_regex,
273
+ user_message_list_text,
274
+ user_message_list_text_splitter,
275
+ user_message_list_text_splitter_is_regex,
276
+ )
277
+
278
+ payload = {
279
+ 'model': prop_model,
280
+ 'temperature': prop_temperature,
281
+ 'top_p': prop_top_p,
282
+ 'n': prop_choices_num,
283
+ 'stream': prop_stream,
284
+ 'presence_penalty': prop_presence_penalty,
285
+ 'frequency_penalty': prop_frequency_penalty,
286
+ 'user': token_text,
287
+ }
288
+ if prop_max_tokens>0:
289
+ payload['max_tokens'] = prop_max_tokens
290
+ # if prop_logit_bias is not None:
291
+ # payload['logit_bias'] = prop_logit_bias
292
+ # headers = {
293
+ # "Content-Type": "application/json",
294
+ # "Authorization": f"Bearer {api_key_text}"
295
+ # }
296
+
297
+ for user_message in user_message_list:
298
+ print('')
299
+ print(user_message)
300
+ print('')
301
+ # make the_messages to sent
302
+ the_messages = []
303
+ if system_prompt_enabled:
304
+ the_messages.append({"role": "system", "content": system_prompt})
305
+ for msg in filtered_history_messages(history, num=history_prompt_num):
306
+ the_messages.append(msg)
307
+ the_messages.append({"role": "user", "content": user_message})
308
+ payload['messages'] = the_messages
309
+
310
+ history.append({"role": "user", "content": user_message, "type": "request", "payload": payload})
311
+
312
+ history_md_stable = make_md_by_history(history)
313
+ history_md_stream = ""
314
+ tips = ""
315
+ yield history, history_md_stable, history_md_stream, tips, gradio.update(visible=False)
316
+
317
+ try:
318
+ for (assistant_message, history_md_stream, tips, history) in sequential_chat_once_fn(payload, api_key_text, history, history_md_stable, history_md_stream, tips):
319
+
320
+ yield history, history_md_stable, history_md_stream, tips, gradio.update()
321
+
322
+ history.append({"role": "assistant", "content": assistant_message, "type": "request"})
323
+ history_md_stable += history_md_stream
324
+ history_md_stream = ""
325
+ tips = "fine"
326
+ yield history, history_md_stable, history_md_stream, tips, gradio.update(visible=False)
327
+
328
+ except Exception as error:
329
+ tips = f'error: {str(error)}'
330
+ history.append({"role": "app", "content": tips})
331
+ print(f"\n{tips}\n")
332
+ yield history, history_md_stable, history_md_stream, tips, gradio.update(visible=False)
333
+
334
+ time.sleep(get_random_sleep(sleep_base, sleep_rand))
335
+ pass
336
+
337
+
338
+
339
+ except Exception as error:
340
+ tips = str(error)
341
+ history.append({"role": "app", "content": tips})
342
+ print(f"\n{tips}\n")
343
+ yield history, history_md_stable, history_md_stream, tips, gradio.update(visible=False)
344
+ pass
345
+
346
+
347
+
348
+
349
+ def on_click_send_btn(
350
+ global_state_json, api_key_text, chat_input_role, chat_input, prompt_table, chat_use_prompt, chat_use_history, chat_log,
351
+ chat_model, temperature, top_p, choices_num, stream, max_tokens, presence_penalty, frequency_penalty, logit_bias,
352
+ ):
353
+
354
+ old_state = json.loads(global_state_json or "{}")
355
+
356
+ print('\n\n\n\n\n')
357
+ print(prompt_table)
358
+ prompt_table = prompt_table or []
359
+
360
+ chat_log = chat_log or []
361
+
362
+ chat_log_md = ''
363
+ if chat_use_prompt:
364
+ chat_log_md += '<center>(prompt)</center>\n\n'
365
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
366
+ chat_log_md += '\n---\n'
367
+ if True:
368
+ chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
369
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
370
+ chat_log_md += '\n---\n'
371
+
372
+ # if chat_input=='':
373
+ # return json.dumps(old_state), chat_log, chat_log_md, chat_log_md, None, None, chat_input
374
+
375
+ print('\n')
376
+ print(chat_input)
377
+ print('')
378
+
379
+ try:
380
+ logit_bias_json = json.dumps(logit_bias) if logit_bias else None
381
+ except:
382
+ return json.dumps(old_state), chat_log, chat_log_md, chat_log_md, None, None, chat_input
383
+
384
+ new_state = copy.deepcopy(old_state) or {}
385
+
386
+
387
+
388
+ req_hist = copy.deepcopy(prompt_table) if chat_use_prompt else []
389
+
390
+ if chat_use_history:
391
+ for hh in (chat_log or []):
392
+ req_hist.append(hh)
393
+
394
+ if chat_input and chat_input!="":
395
+ req_hist.append([(chat_input_role or 'user'), chat_input])
396
+
397
+ openai.api_key = api_key_text
398
+
399
+ props = {
400
+ 'model': chat_model,
401
+ 'messages': [xx for xx in map(lambda it: {'role':it[0], 'content':it[1]}, req_hist)],
402
+ 'temperature': temperature,
403
+ 'top_p': top_p,
404
+ 'n': choices_num,
405
+ 'stream': stream,
406
+ 'presence_penalty': presence_penalty,
407
+ 'frequency_penalty': frequency_penalty,
408
+ }
409
+ if max_tokens>0:
410
+ props['max_tokens'] = max_tokens
411
+ if logit_bias_json is not None:
412
+ props['logit_bias'] = logit_bias_json
413
+
414
+ props_json = json.dumps(props)
415
+
416
+ try:
417
+ completion = openai.ChatCompletion.create(**props)
418
+ print('')
419
+
420
+ # print(completion.choices)
421
+ # the_response_role = completion.choices[0].message.role
422
+ # the_response = completion.choices[0].message.content
423
+ # print(the_response)
424
+ # print('')
425
+ # chat_last_resp = json.dumps(completion.__dict__)
426
+ # chat_last_resp_dict = json.loads(chat_last_resp)
427
+ # chat_last_resp_dict['api_key'] = "hidden by UI"
428
+ # chat_last_resp_dict['organization'] = "hidden by UI"
429
+ # chat_last_resp = json.dumps(chat_last_resp_dict)
430
+
431
+
432
+ chat_log_md = ''
433
+ if chat_use_prompt:
434
+ chat_log_md += '<center>(prompt)</center>\n\n'
435
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
436
+ chat_log_md += '\n---\n'
437
+ if True:
438
+ chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
439
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
440
+ chat_log_md += '\n---\n'
441
+
442
+ if chat_input and chat_input!="":
443
+ chat_log.append([(chat_input_role or 'user'), chat_input])
444
+ chat_log_md += f"##### `{(chat_input_role or 'user')}`\n\n{chat_input}\n\n"
445
+
446
+ partial_words = ""
447
+ counter=0
448
+
449
+ if stream:
450
+ the_response = ''
451
+ the_response_role = ''
452
+ for chunk in completion:
453
+ #Skipping first chunk
454
+ if counter == 0:
455
+ the_response_role = chunk.choices[0].delta.role
456
+ chat_log_md += f"##### `{the_response_role}`\n\n"
457
+ counter += 1
458
+ continue
459
+ # print(('chunk', chunk))
460
+ if chunk.choices[0].finish_reason is None:
461
+ the_response_chunk = chunk.choices[0].delta.content
462
+ the_response += the_response_chunk
463
+ chat_log_md += f"{the_response_chunk}"
464
+ yield json.dumps(new_state), chat_log, chat_log_md, chat_log_md, "{}", props_json, ''
465
+ else:
466
+ chat_log.append([the_response_role, the_response])
467
+ chat_log_md += f"\n\n"
468
+ yield json.dumps(new_state), chat_log, chat_log_md, chat_log_md, '{"msg": "stream模式不支持显示"}', props_json, ''
469
+ # chat_last_resp = json.dumps(completion.__dict__)
470
+ # chat_last_resp_dict = json.loads(chat_last_resp)
471
+ # chat_last_resp_dict['api_key'] = "hidden by UI"
472
+ # chat_last_resp_dict['organization'] = "hidden by UI"
473
+ # chat_last_resp = json.dumps(chat_last_resp_dict)
474
+ else:
475
+ the_response_role = completion.choices[0].message.role
476
+ the_response = completion.choices[0].message.content
477
+ print(the_response)
478
+ print('')
479
+
480
+ chat_log.append([the_response_role, the_response])
481
+ chat_log_md += f"##### `{the_response_role}`\n\n{the_response}\n\n"
482
+
483
+ chat_last_resp = json.dumps(completion.__dict__)
484
+ chat_last_resp_dict = json.loads(chat_last_resp)
485
+ chat_last_resp_dict['api_key'] = "hidden by UI"
486
+ chat_last_resp_dict['organization'] = "hidden by UI"
487
+ chat_last_resp = json.dumps(chat_last_resp_dict)
488
+
489
+ return json.dumps(new_state), chat_log, chat_log_md, chat_log_md, chat_last_resp, props_json, ''
490
+
491
+ # chat_log.append([the_response_role, the_response])
492
+ # chat_log_md += f"##### `{the_response_role}`\n\n{the_response}\n\n"
493
+ # return json.dumps(new_state), chat_log, chat_log_md, chat_log_md, chat_last_resp, props_json, ''
494
+ except Exception as error:
495
+ print(error)
496
+ print('error!!!!!!')
497
+
498
+ chat_log_md = ''
499
+ if chat_use_prompt:
500
+ chat_log_md += '<center>(prompt)</center>\n\n'
501
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
502
+ chat_log_md += '\n---\n'
503
+ if True:
504
+ chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
505
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
506
+ chat_log_md += '\n---\n'
507
+
508
+ # chat_log_md = ''
509
+ # chat_log_md = "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)]) if chat_use_prompt else ''
510
+ # chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", hist)])
511
+
512
+ chat_log_md += "\n"
513
+ chat_log_md += str(error)
514
+ return json.dumps(new_state), chat_log, chat_log_md, chat_log_md, None, props_json, chat_input
515
+
516
+
517
+ def clear_history():
518
+ return [], ""
519
+
520
+
521
+ def copy_history(txt):
522
+ # print('\n\n copying')
523
+ # print(txt)
524
+ # print('\n\n')
525
+ pass
526
+
527
+
528
+ def update_saved_prompt_titles(global_state_json, selected_saved_prompt_title):
529
+ print('')
530
+ global_state = json.loads(global_state_json or "{}")
531
+ print(global_state)
532
+ print(selected_saved_prompt_title)
533
+ saved_prompts = global_state.get('saved_prompts') or []
534
+ print(saved_prompts)
535
+ the_choices = [(it.get('title') or '[untitled]') for it in saved_prompts]
536
+ print(the_choices)
537
+ print('')
538
+ return gradio.Dropdown.update(choices=the_choices)
539
+
540
+
541
+ def save_prompt(global_state_json, saved_prompts, prompt_title, prompt_table):
542
+ the_choices = []
543
+ global_state = json.loads(global_state_json or "{}")
544
+ saved_prompts = global_state.get('saved_prompts') or []
545
+ if len(saved_prompts):
546
+ the_choices = [it.get('title') or '[untitled]' for it in saved_prompts]
547
+ pass
548
+ return global_state_json, gradio.Dropdown.update(choices=the_choices, value=prompt_title), prompt_title, prompt_table
549
+
550
+
551
+ def load_saved_prompt(title):
552
+ pass