Omnibus commited on
Commit
126dfaf
·
verified ·
1 Parent(s): 0670bc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +523 -2
app.py CHANGED
@@ -151,6 +151,526 @@ def compress_data(c,purpose, task, history):
151
 
152
 
153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  def get_records(inp,data):
155
  key_box=[]
156
  seed=random.randint(1,1000000000)
@@ -426,7 +946,7 @@ def find_rss():
426
  error_box.append({"Name":rss_url,"Error":e,"Error Code":6})
427
  print(f'Exception::{e}')
428
  pass
429
- '''
430
  json_object_valid = json.dumps(valid_box, indent=4)
431
  with open("tmp3.json", "w") as outfile3:
432
  outfile3.write(json_object_valid)
@@ -439,7 +959,7 @@ def find_rss():
439
  repo_type="dataset",
440
  )
441
  yield out_box,[(None,'')],error_box
442
- '''
443
  print("DONE")
444
  json_object = json.dumps(out_box, indent=4)
445
  #json_object = json.dumps(out_box,indent=4)
@@ -541,3 +1061,4 @@ with gr.Blocks() as app:
541
  u_btn.click(find_rss,None,[out_json,cb,error_box])
542
  sub_btn.click(summarize,[inst,cb,out_json],[inst,cb,error_box]).then(load_html,error_box,html_out)
543
  app.queue(default_concurrency_limit=20).launch()
 
 
151
 
152
 
153
 
154
+ def get_records(inp,data):
155
+ key_box=[]
156
+ seed=random.randint(1,1000000000)
157
+ print(inp)
158
+ out = str(data)
159
+ rl = len(out)
160
+ print(f'rl:: {rl}')
161
+ c=1
162
+ for i in str(out):
163
+ if i == " " or i=="," or i=="\n" or i=="/" or i=="." or i=="<":
164
+ c +=1
165
+ print (f'c:: {c}')
166
+ divr=int(c)/MAX_DATA
167
+ divi=int(divr)+1 if divr != int(divr) else int(divr)
168
+ chunk = int(int(c)/divr)
169
+ print(f'chunk:: {chunk}')
170
+ print(f'divr:: {divr}')
171
+ print (f'divi:: {divi}')
172
+ s=0
173
+ e=chunk
174
+ print(f'e:: {e}')
175
+ new_history=""
176
+ #task = f'Compile this data to fulfill the task: {task}, and complete the purpose: {purpose}\n'
177
+ for z in range(divi):
178
+ print(f's:e :: {s}:{e}')
179
+
180
+ hist = out[s:e]
181
+ print(f'hist::\n{hist}')
182
+ resp = run_gpt(
183
+ GET_KEYWORD,
184
+ stop_tokens=[],
185
+ max_tokens=2048,
186
+ seed=seed,
187
+ purpose=inp,
188
+ prefix_tog="alternate",
189
+ task=inp,
190
+ knowledge=new_history,
191
+ history=hist,
192
+ ).strip("\n")
193
+ new_history = resp
194
+ print (f'resp {z}::\n {resp}')
195
+ #out+=resp
196
+ e=e+chunk
197
+ s=s+chunk
198
+ yield "", [(inp,new_history)]
199
+
200
+
201
+ def get_key(inp,data):
202
+ key_box=[]
203
+ seed=random.randint(1,1000000000)
204
+ key_w = run_gpt(
205
+ GET_KEYWORD,
206
+ stop_tokens=[],
207
+ max_tokens=56,
208
+ seed=seed,
209
+ purpose=inp,
210
+ prefix_tog="normal",
211
+ task=inp,
212
+ ).split("<")[0]
213
+ print(f'key_w::{key_w}')
214
+ if " " in key_w:
215
+ key_w=key_w.split(" ")[-1]
216
+ for i,ba in enumerate(data):
217
+ each_key=data[i].keys()
218
+ print(each_key)
219
+ for z,zz in enumerate(list(each_key)[0]):
220
+ #for f,ff in enumerate(data[i][zz]):
221
+ ea = data[i][list(each_key)[0]][z]
222
+ try:
223
+ if ea['title'] and key_w in ea['title']:
224
+ key_box.append(ea)
225
+ elif ea['description'] and key_w in ea['description']:
226
+ key_box.append(ea)
227
+ elif ea['link'] and key_w in ea['link']:
228
+ key_box.append(ea)
229
+ except Exception as e:
230
+ print(e)
231
+ print(key_box)
232
+
233
+ NEWS_REPORTER="""You are an Expert News Aggregator. Your duty is to compress all of the News Articles you are given into 10 or more individual articles that capture the full context of the current news. Compile your articles into JSON format which the user will load into an RSS reader for other users to read.
234
+ Add NEW DATA that you recieve to your CURRENT DATA by combining and reformatting when needed.
235
+ Output Format:
236
+ "title": "title of the first article",
237
+ "description": "description of the article",
238
+ "article": "your custom written article",
239
+ "links": "all source links that have contributed to the article",
240
+ News Articles:
241
+ {new_data}
242
+ """
243
+ def summarize(inp,history,data=None):
244
+ json_box=[]
245
+ if inp == "":
246
+ inp = "Process this data"
247
+ #inp = format_prompt(inp,history)
248
+ task = "Compile a detailed report"
249
+
250
+ history.clear()
251
+ yield "",[(inp,"Working on it...")],None
252
+
253
+ if data != "Error" and data != "":
254
+ timestamp=datetime.datetime.now()
255
+ seed=random.randint(1,1000000000)
256
+ print(seed)
257
+ generate_kwargs = dict(
258
+ temperature=0.9,
259
+ max_new_tokens=10240,
260
+ top_p=0.95,
261
+ repetition_penalty=1.0,
262
+ do_sample=True,
263
+ seed=seed,
264
+ )
265
+ out = str(data)
266
+ rl = len(out)
267
+ print(f'rl:: {rl}')
268
+ c=1
269
+ for i in str(out):
270
+ if i == " " or i=="," or i=="\n" or i=="/" or i=="." or i=="<":
271
+ c +=1
272
+ print (f'c:: {c}')
273
+ divr=int(c)/MAX_DATA
274
+ divi=int(divr)+1 if divr != int(divr) else int(divr)
275
+ chunk = int(int(c)/divr)
276
+ print(f'chunk:: {chunk}')
277
+ print(f'divr:: {divr}')
278
+ print (f'divi:: {divi}')
279
+ s=0
280
+ e=chunk
281
+ print(f'e:: {e}')
282
+ out_box =[]
283
+ resp = ""
284
+ for z in range(divi):
285
+ print(f's:e :: {s}:{e}')
286
+ mes= f'Working on data chunk: {s}:{e}'
287
+ new_data = out[s:e]
288
+ #yield "", [(inp,f'{mes}\n{new_history}')]
289
+
290
+ content = NEWS_REPORTER.format(new_data=str(new_data.replace("{","").replace("}","")))
291
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
292
+ for response in stream:
293
+ resp += response.token.text
294
+ yield "", [(inp,resp)],None
295
+
296
+ #for line in resp.split("\n"):
297
+ #if 'title:' in line.lower():
298
+
299
+
300
+ #out_json=""
301
+ out_json=resp.replace("\n","").replace("```","")
302
+ out_box.append(out_json.strip("</s>"))
303
+ out_box=eval(out_box)
304
+ print("ADDING")
305
+ yield "", [(inp,resp)],out_box
306
+
307
+ e=e+chunk
308
+ s=s+chunk
309
+
310
+ #history = "preliminary result: {}\n".format(resp)
311
+ #yield "", (inp,f'{mes}\n{history}')
312
+ #print ("final" + resp)
313
+ #out_hist = "result:\n{}".format(resp)
314
+ #return history
315
+ #yield "", [(inp,out_hist)]
316
+
317
+ #out = str(out_hist)
318
+ #rawp = out
319
+ else:
320
+ rawp = "Provide a valid data source"
321
+ history.append((inp,rawp))
322
+ yield "", history,None
323
+
324
+
325
+ def find_rss():
326
+ timestamp=str(datetime.datetime.now())
327
+ timename=timestamp.replace(" ","--").replace(":","-").replace(".","-")
328
+ error_box=[]
329
+ error_box_schema={"Name":"","Error":"","Keys":"","Other":""}
330
+ lod=""
331
+ out_box=[]
332
+ valid_box=[]
333
+ yield [],[(None,"loading sources")],None
334
+ with open ('valid_feeds.json','r') as j:
335
+ cont = json.loads(j.read())
336
+ #print(cont)
337
+ j.close()
338
+ for ea in cont:
339
+ try:
340
+ #lod=""
341
+ #print (ea['link'])
342
+ if ea.get('link') is not None:
343
+ rss_url=ea['link']
344
+ else:
345
+ rss_url=ea['URL']
346
+ link_box=[]
347
+ r = requests.get(f'{rss_url}')
348
+ if r.status_code == 200:
349
+ try:
350
+ if ".json" in rss_url:
351
+ lod = json.loads(r.text)
352
+ if ".xml" in rss_url:
353
+ lod = xmltodict.parse(r.content)
354
+ if ".rss" in rss_url:
355
+ lod = xmltodict.parse(r.content)
356
+ else:
357
+ try:
358
+ lod = xmltodict.parse(r.content)
359
+ except Exception as e:
360
+ lod=f'{rss_url} ::ERROR:: {e}'
361
+ error_box.append({"Name":rss_url,"Error":e,"Error Code":1})
362
+ if ea.get('section') is not None:
363
+ section = ea['section']
364
+ else: section = ""
365
+ valid_box.append({"source":ea['source'],"link":ea['link'],"section":section,"description":''})
366
+ except Exception as e:
367
+ lod=f'{rss_url} ::ERROR:: {e}'
368
+ error_box.append({"Name":rss_url,"Error":e,"Error Code":2})
369
+
370
+ else:
371
+ lod = f'{rss_url} ::ERROR::COULD NOT CONNECT:: {r.status_code}'
372
+ error_box.append({"Name":rss_url,"Error":f'Status Code:{r.status_code}',"Error Code":3})
373
+ pass
374
+ try:
375
+ #print(lod['rss']['channel']['item'][0].keys())
376
+ #print(lod['rss'].keys())
377
+ print("##############")
378
+ #print(lod['rss'].keys())
379
+ print("##############")
380
+
381
+ for i,ea in enumerate(lod['rss']['channel']['item']):
382
+ try:
383
+ r_link = ea['link']
384
+ if ea.get('title') != None:
385
+ r_title = ea['title']
386
+ else: r_title= ea['source']
387
+ tt = TextBlob(r_title)
388
+ #tt_nouns=tt.tags
389
+ tt_phrases=tt.noun_phrases
390
+ if ea.get('description') != None:
391
+ r_description = ea['description']
392
+ else: r_description = [lod['rss']['channel']['item']]
393
+ td = TextBlob(r_description)
394
+ #td_nouns=td.tags
395
+ td_phrases=td.noun_phrases
396
+ #tdd_nouns=[x for x in td_nouns if x[1]=='NN' or x[1]=='NNP']
397
+ #ttt_nouns=[x1 for x1 in tt_nouns if x1[1]=='NN' or x1[1]=='NNP']
398
+ #nouns=ttt_nouns+tdd_nouns
399
+
400
+ phrases=tt_phrases+td_phrases
401
+ lods = {"title":r_title, "description":r_description,"link":r_link, "noun_phrases":phrases}
402
+ except Exception as e:
403
+ print(f"Exception::{ea}")
404
+ error_box.append({"Name":rss_url,"Keys":lod['rss']['channel']['item'],"Error":e,"Error Code":4})
405
+ print(e)
406
+ pass
407
+ #lods = {"title":"ERROR", "description":{e},"link":"ERROR"}
408
+
409
+ """
410
+ r_link = lod['rss']['channel']['item'][i]['link']
411
+ r_title = lod['rss']['channel']['item'][i]['title']
412
+ r_description = lod['rss']['channel']['item'][i]['description']"""
413
+ link_box.append(lods)
414
+ lod={lod['rss']['channel']['title']:link_box}
415
+ out_box.append(lod)
416
+
417
+ except Exception as e:
418
+ error_box.append({"Name":rss_url,"Error":e,"Error Code":5})
419
+ print(f'Exception::{e}')
420
+ except Exception as e:
421
+ error_box.append({"Name":rss_url,"Error":e,"Error Code":6})
422
+ print(f'Exception::{e}')
423
+ pass
424
+ '''
425
+ json_object_valid = json.dumps(valid_box, indent=4)
426
+ with open("tmp3.json", "w") as outfile3:
427
+ outfile3.write(json_object_valid)
428
+ api.upload_file(
429
+ path_or_fileobj="tmp3.json",
430
+ path_in_repo=f"/rss/valid-{timename}.json",
431
+ repo_id=reponame,
432
+ #repo_id=save_data.split('datasets/',1)[1].split('/raw',1)[0],
433
+ token=token_self,
434
+ repo_type="dataset",
435
+ )
436
+ yield out_box,[(None,'')],error_box
437
+ '''
438
+ print("DONE")
439
+ json_object = json.dumps(out_box, indent=4)
440
+ #json_object = json.dumps(out_box,indent=4)
441
+ with open("tmp2.json", "w") as outfile:
442
+ outfile.write(json_object)
443
+ api.upload_file(
444
+ path_or_fileobj="tmp2.json",
445
+ path_in_repo=f"/rss1/{timename}.json",
446
+ repo_id=reponame,
447
+ #repo_id=save_data.split('datasets/',1)[1].split('/raw',1)[0],
448
+ token=token_self,
449
+ repo_type="dataset",
450
+ )
451
+
452
+ yield out_box,[(None,f'Source is current as of:\n{timestamp} UTC\n\nThe current Date and Time is:\n{timestamp} UTC')],error_box
453
+
454
+
455
+ def load_data(rss_url=None):
456
+ timestamp=str(datetime.datetime.now())
457
+ yield None,[(None,f'Loading data source, please wait')]
458
+ if rss_url:
459
+ yield None,[(None,f'Loading data from {rss_url}, please wait')]
460
+
461
+ r = requests.get(f'{rss_url}')
462
+ if r.status_code == 200:
463
+ lod={}
464
+ try:
465
+ if ".json" in rss_url:
466
+ lod = json.loads(r.text)
467
+ if ".xml" in rss_url:
468
+ lod = xmltodict.parse(r.content)
469
+ if ".rss" in rss_url:
470
+ lod = xmltodict.parse(r.content)
471
+ else:
472
+ try:
473
+ lod = xmltodict.parse(r.content)
474
+ except Exception as e:
475
+ yield None, [(None, f'{rss_url} ::ERROR:: {e}')]
476
+ except Exception as e:
477
+ yield None, [(None, f'{rss_url} ::ERROR:: {e}')]
478
+ yield lod,[(None,f'Source is current as of:\n{timestamp} UTC')]
479
+ else:
480
+ yield None, [(None, f'{rss_url} ::ERROR::COULD NOT CONNECT:: {r.status_code}')]
481
+ if not rss_url:
482
+ yield None,[(None,f'Loading data from database, please wait')]
483
+
484
+ f_ist = (api.list_repo_files(repo_id=f'{reponame}', repo_type="dataset"))
485
+ f_ist.sort(reverse=True)
486
+ #print(f_ist)
487
+ r = requests.get(f'{save_data}{f_ist[0]}')
488
+ lod = json.loads(r.text)
489
+ filename=f_ist[0].split("/")[1].split(".json")[0].replace("--"," ")
490
+ print (filename)
491
+ filename_start = filename.split(" ")[0]
492
+ filename_end = filename.split(" ")[1]
493
+ filename_end = filename_end.replace("-"[0],":").replace("-"[0],":").replace("-"[0],".")
494
+ #filename_end_far=filename_end.split(":")[2]
495
+ print (filename)
496
+ yield lod,[(None,f'Source is current as of:\n{filename_start} {filename_end} UTC\n\nThe current Date and Time is:\n{timestamp} UTC')]
497
+
498
+ with gr.Blocks() as app:
499
+ cb = gr.Chatbot(height=600, show_share_button=True, show_copy_button=True)
500
+ with gr.Row():
501
+ inst = gr.Textbox(label="Instructions")
502
+ sub_btn=gr.Button("Submit")
503
+ with gr.Row():
504
+ rss_custom=gr.Textbox(label="URL for RSS feed (.rss,.xml)")
505
+ load_btn = gr.Button("Load RSS")
506
+ with gr.Accordion(open=False):
507
+ u_btn=gr.Button("Update [RSS Data]")
508
+ keyw = gr.Button("Use Keyword [Experimental]")
509
+ with gr.Row():
510
+ out_json = gr.JSON()
511
+ #error_box=gr.JSON()
512
+ error_box=gr.JSON()
513
+ fil = gr.Textbox()
514
+ keyw.click(get_records,[inst,out_json],[inst,cb])
515
+ load_btn.click(load_data,rss_custom,[out_json,cb])
516
+ u_btn.click(find_rss,None,[out_json,cb,error_box])
517
+ sub_btn.click(summarize,[inst,cb,out_json],[inst,cb,error_box])
518
+ app.queue(default_concurrency_limit=20).launch()
519
+
520
+
521
+ '''import gradio as gr
522
+ import requests
523
+ import bs4
524
+ import lxml
525
+ import os
526
+ from huggingface_hub import InferenceClient,HfApi
527
+ import random
528
+ import json
529
+ import datetime
530
+ import xmltodict
531
+ from textblob import TextBlob
532
+ os.system("python -m textblob.download_corpora")
533
+
534
+
535
+ from prompts import (
536
+ GET_KEYWORD,
537
+ COMPRESS_HISTORY_PROMPT,
538
+ COMPRESS_DATA_PROMPT,
539
+ COMPRESS_DATA_PROMPT_SMALL,
540
+ PREFIX_ALT,
541
+ PREFIX,
542
+ )
543
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
544
+ reponame="Omnibus/tmp"
545
+ save_data=f'https://huggingface.co/datasets/{reponame}/raw/main/'
546
+ token_self = os.environ['HF_TOKEN2']
547
+ api=HfApi(token=token_self)
548
+
549
+
550
+ def parse_action(string: str):
551
+ print("PARSING:")
552
+ print(string)
553
+ assert string.startswith("action:")
554
+ idx = string.find("action_input=")
555
+ print(idx)
556
+ if idx == -1:
557
+ print ("idx == -1")
558
+ print (string[8:])
559
+ return string[8:], None
560
+
561
+ print ("last return:")
562
+ print (string[8 : idx - 1])
563
+ print (string[idx + 13 :].strip("'").strip('"'))
564
+ return string[8 : idx - 1], string[idx + 13 :].strip("'").strip('"')
565
+
566
+ MAX_HISTORY = 100
567
+ MAX_DATA = 40000
568
+
569
+ def format_prompt(message, history):
570
+ prompt = "<s>"
571
+ for user_prompt, bot_response in history:
572
+ prompt += f"[INST] {user_prompt} [/INST]"
573
+ prompt += f" {bot_response}</s> "
574
+ prompt += f"[INST] {message} [/INST]"
575
+ return prompt
576
+
577
+ def run_gpt(
578
+ prompt_template,
579
+ stop_tokens,
580
+ max_tokens,
581
+ seed,
582
+ purpose,
583
+ prefix_tog,
584
+ **prompt_kwargs,
585
+ ):
586
+ timestamp=datetime.datetime.now()
587
+
588
+ print(seed)
589
+ generate_kwargs = dict(
590
+ temperature=0.9,
591
+ max_new_tokens=max_tokens,
592
+ top_p=0.95,
593
+ repetition_penalty=1.0,
594
+ do_sample=True,
595
+ seed=seed,
596
+ )
597
+ print(f'prefix_tog:: {prefix_tog}')
598
+ if prefix_tog == "normal":
599
+ content = PREFIX.format(
600
+ timestamp=timestamp,
601
+ purpose=purpose,
602
+ ) + prompt_template.format(**prompt_kwargs)
603
+ if prefix_tog == "alternate":
604
+ content = PREFIX_ALT + prompt_template.format(**prompt_kwargs)
605
+
606
+
607
+ #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
608
+ #formatted_prompt = format_prompt(f'{content}', **prompt_kwargs['history'])
609
+
610
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
611
+ resp = ""
612
+ for response in stream:
613
+ resp += response.token.text
614
+ #yield resp
615
+
616
+ return resp
617
+
618
+
619
+ def compress_data(c,purpose, task, history):
620
+ seed=random.randint(1,1000000000)
621
+ print (c)
622
+ divr=int(c)/MAX_DATA
623
+ divi=int(divr)+1 if divr != int(divr) else int(divr)
624
+ chunk = int(int(c)/divr)
625
+ print(f'chunk:: {chunk}')
626
+ print(f'divr:: {divr}')
627
+ print (f'divi:: {divi}')
628
+ out = []
629
+ #out=""
630
+ s=0
631
+ e=chunk
632
+ print(f'e:: {e}')
633
+ new_history=""
634
+ task = f'Compile this data to fulfill the task: {task}, and complete the purpose: {purpose}\n'
635
+ for z in range(divi):
636
+ print(f's:e :: {s}:{e}')
637
+
638
+ hist = history[s:e]
639
+ print(f'hist::\n{hist}')
640
+ resp = run_gpt(
641
+ COMPRESS_DATA_PROMPT,
642
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
643
+ max_tokens=2048,
644
+ seed=seed,
645
+ purpose=purpose,
646
+ prefix_tog="normal",
647
+ task=task,
648
+ knowledge=new_history,
649
+ history=hist,
650
+ ).strip("\n")
651
+ new_history = resp
652
+ print (resp)
653
+ out+=resp
654
+ e=e+chunk
655
+ s=s+chunk
656
+ '''
657
+ resp = run_gpt(
658
+ COMPRESS_DATA_PROMPT,
659
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
660
+ max_tokens=2048,
661
+ seed=seed,
662
+ purpose=purpose,
663
+ task=task,
664
+ knowledge=new_history,
665
+ history=result,
666
+ )
667
+ '''
668
+ print ("final" + resp)
669
+ history = "result: {}\n".format(resp)
670
+ return history
671
+
672
+
673
+
674
  def get_records(inp,data):
675
  key_box=[]
676
  seed=random.randint(1,1000000000)
 
946
  error_box.append({"Name":rss_url,"Error":e,"Error Code":6})
947
  print(f'Exception::{e}')
948
  pass
949
+ """
950
  json_object_valid = json.dumps(valid_box, indent=4)
951
  with open("tmp3.json", "w") as outfile3:
952
  outfile3.write(json_object_valid)
 
959
  repo_type="dataset",
960
  )
961
  yield out_box,[(None,'')],error_box
962
+ """
963
  print("DONE")
964
  json_object = json.dumps(out_box, indent=4)
965
  #json_object = json.dumps(out_box,indent=4)
 
1061
  u_btn.click(find_rss,None,[out_json,cb,error_box])
1062
  sub_btn.click(summarize,[inst,cb,out_json],[inst,cb,error_box]).then(load_html,error_box,html_out)
1063
  app.queue(default_concurrency_limit=20).launch()
1064
+ '''