LeetTools commited on
Commit
7f2676a
Β·
verified Β·
1 Parent(s): 1af3a2c

Upload ask.py

Browse files
Files changed (1) hide show
  1. ask.py +252 -42
ask.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import json
2
  import logging
3
  import os
@@ -5,9 +7,10 @@ import queue
5
  import urllib.parse
6
  from concurrent.futures import ThreadPoolExecutor
7
  from datetime import datetime
 
8
  from functools import partial
9
  from queue import Queue
10
- from typing import Any, Dict, Generator, List, Optional, Tuple
11
 
12
  import click
13
  import duckdb
@@ -17,12 +20,20 @@ from bs4 import BeautifulSoup
17
  from dotenv import load_dotenv
18
  from jinja2 import BaseLoader, Environment
19
  from openai import OpenAI
20
- from pydantic import BaseModel
 
 
 
21
 
22
  script_dir = os.path.dirname(os.path.abspath(__file__))
23
  default_env_file = os.path.abspath(os.path.join(script_dir, ".env"))
24
 
25
 
 
 
 
 
 
26
  class AskSettings(BaseModel):
27
  date_restrict: int
28
  target_site: str
@@ -31,6 +42,8 @@ class AskSettings(BaseModel):
31
  url_list: List[str]
32
  inference_model_name: str
33
  hybrid_search: bool
 
 
34
 
35
 
36
  def _get_logger(log_level: str) -> logging.Logger:
@@ -60,6 +73,36 @@ def _read_url_list(url_list_file: str) -> List[str]:
60
  return url_list
61
 
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  class Ask:
64
 
65
  def __init__(self, logger: Optional[logging.Logger] = None):
@@ -411,6 +454,19 @@ CREATE TABLE {table_name} (
411
  template = env.from_string(template_str)
412
  return template.render(variables)
413
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
  def run_inference(
415
  self,
416
  query: str,
@@ -484,6 +540,74 @@ Here is the context:
484
  response_str = completion.choices[0].message.content
485
  return response_str
486
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487
  def run_query_gradio(
488
  self,
489
  query: str,
@@ -494,6 +618,8 @@ Here is the context:
494
  url_list_str: str,
495
  inference_model_name: str,
496
  hybrid_search: bool,
 
 
497
  ) -> Generator[Tuple[str, str], None, Tuple[str, str]]:
498
  logger = self.logger
499
  log_queue = Queue()
@@ -511,6 +637,8 @@ Here is the context:
511
  url_list=url_list,
512
  inference_model_name=inference_model_name,
513
  hybrid_search=hybrid_search,
 
 
514
  )
515
 
516
  queue_handler = logging.Handler()
@@ -547,47 +675,78 @@ Here is the context:
547
  logger.info(f"βœ… Scraped {len(scrape_results)} URLs.")
548
  yield "", update_logs()
549
 
550
- logger.info("Chunking the text ...")
551
- yield "", update_logs()
552
- chunking_results = self.chunk_results(scrape_results, 1000, 100)
553
- total_chunks = 0
554
- for url, chunks in chunking_results.items():
555
- logger.debug(f"URL: {url}")
556
- total_chunks += len(chunks)
557
- for i, chunk in enumerate(chunks):
558
- logger.debug(f"Chunk {i+1}: {chunk}")
559
- logger.info(f"βœ… Generated {total_chunks} chunks ...")
560
- yield "", update_logs()
 
561
 
562
- logger.info(f"Saving {total_chunks} chunks to DB ...")
563
- yield "", update_logs()
564
- table_name = self.save_chunks_to_db(chunking_results)
565
- logger.info(f"βœ… Successfully embedded and saved chunks to DB.")
566
- yield "", update_logs()
567
 
568
- logger.info("Querying the vector DB to get context ...")
569
- matched_chunks = self.vector_search(table_name, query, settings)
570
- for i, result in enumerate(matched_chunks):
571
- logger.debug(f"{i+1}. {result}")
572
- logger.info(f"βœ… Got {len(matched_chunks)} matched chunks.")
573
- yield "", update_logs()
574
 
575
- logger.info("Running inference with context ...")
576
- yield "", update_logs()
577
- answer = self.run_inference(
578
- query=query,
579
- matched_chunks=matched_chunks,
580
- settings=settings,
581
- )
582
- logger.info("βœ… Finished inference API call.")
583
- logger.info("Generating output ...")
584
- yield "", update_logs()
585
 
586
- answer = f"# Answer\n\n{answer}\n"
587
- references = "\n".join(
588
- [f"[{i+1}] {result['url']}" for i, result in enumerate(matched_chunks)]
589
- )
590
- yield f"{answer}\n\n# References\n\n{references}", update_logs()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
591
 
592
  logs = ""
593
  final_result = ""
@@ -618,6 +777,8 @@ Here is the context:
618
  url_list_str=url_list_str,
619
  inference_model_name=settings.inference_model_name,
620
  hybrid_search=settings.hybrid_search,
 
 
621
  ):
622
  final_result = result
623
  return final_result
@@ -630,6 +791,13 @@ def launch_gradio(
630
  logger: logging.Logger,
631
  ) -> None:
632
  ask = Ask(logger=logger)
 
 
 
 
 
 
 
633
  with gr.Blocks() as demo:
634
  gr.Markdown("# Ask.py - Web Search-Extract-Summarize")
635
  gr.Markdown(
@@ -640,9 +808,22 @@ def launch_gradio(
640
  with gr.Column():
641
 
642
  query_input = gr.Textbox(label="Query", value=query)
643
- hybrid_search_input = gr.Checkbox(
644
- label="Hybrid Search [Use both vector search and full-text search.]",
645
- value=init_settings.hybrid_search,
 
 
 
 
 
 
 
 
 
 
 
 
 
646
  )
647
  date_restrict_input = gr.Number(
648
  label="Date Restrict (Optional) [0 or empty means no date limit.]",
@@ -668,6 +849,10 @@ def launch_gradio(
668
  )
669
 
670
  with gr.Accordion("More Options", open=False):
 
 
 
 
671
  inference_model_name_input = gr.Textbox(
672
  label="Inference Model Name",
673
  value=init_settings.inference_model_name,
@@ -690,6 +875,8 @@ def launch_gradio(
690
  url_list_input,
691
  inference_model_name_input,
692
  hybrid_search_input,
 
 
693
  ],
694
  outputs=[answer_output, logs_output],
695
  )
@@ -699,6 +886,14 @@ def launch_gradio(
699
 
700
  @click.command(help="Search web for the query and summarize the results.")
701
  @click.option("--query", "-q", required=False, help="Query to search")
 
 
 
 
 
 
 
 
702
  @click.option(
703
  "--date-restrict",
704
  "-d",
@@ -735,6 +930,14 @@ def launch_gradio(
735
  show_default=True,
736
  help="Instead of doing web search, scrape the target URL list and answer the query based on the content",
737
  )
 
 
 
 
 
 
 
 
738
  @click.option(
739
  "--inference-model-name",
740
  "-m",
@@ -763,11 +966,13 @@ def launch_gradio(
763
  )
764
  def search_extract_summarize(
765
  query: str,
 
766
  date_restrict: int,
767
  target_site: str,
768
  output_language: str,
769
  output_length: int,
770
  url_list_file: str,
 
771
  inference_model_name: str,
772
  hybrid_search: bool,
773
  web_ui: bool,
@@ -776,6 +981,9 @@ def search_extract_summarize(
776
  load_dotenv(dotenv_path=default_env_file, override=False)
777
  logger = _get_logger(log_level)
778
 
 
 
 
779
  settings = AskSettings(
780
  date_restrict=date_restrict,
781
  target_site=target_site,
@@ -784,6 +992,8 @@ def search_extract_summarize(
784
  url_list=_read_url_list(url_list_file),
785
  inference_model_name=inference_model_name,
786
  hybrid_search=hybrid_search,
 
 
787
  )
788
 
789
  if web_ui or os.environ.get("RUN_GRADIO_UI", "false").lower() != "false":
 
1
+ import csv
2
+ import io
3
  import json
4
  import logging
5
  import os
 
7
  import urllib.parse
8
  from concurrent.futures import ThreadPoolExecutor
9
  from datetime import datetime
10
+ from enum import Enum
11
  from functools import partial
12
  from queue import Queue
13
+ from typing import Any, Dict, Generator, List, Optional, Tuple, TypeVar
14
 
15
  import click
16
  import duckdb
 
20
  from dotenv import load_dotenv
21
  from jinja2 import BaseLoader, Environment
22
  from openai import OpenAI
23
+ from pydantic import BaseModel, create_model
24
+
25
+ TypeVar_BaseModel = TypeVar("TypeVar_BaseModel", bound=BaseModel)
26
+
27
 
28
  script_dir = os.path.dirname(os.path.abspath(__file__))
29
  default_env_file = os.path.abspath(os.path.join(script_dir, ".env"))
30
 
31
 
32
+ class OutputMode(str, Enum):
33
+ answer = "answer"
34
+ extract = "extract"
35
+
36
+
37
  class AskSettings(BaseModel):
38
  date_restrict: int
39
  target_site: str
 
42
  url_list: List[str]
43
  inference_model_name: str
44
  hybrid_search: bool
45
+ output_mode: OutputMode
46
+ extract_schema_str: str
47
 
48
 
49
  def _get_logger(log_level: str) -> logging.Logger:
 
73
  return url_list
74
 
75
 
76
+ def _read_extract_schema_str(extract_schema_file: str) -> str:
77
+ if not extract_schema_file:
78
+ return ""
79
+
80
+ with open(extract_schema_file, "r") as f:
81
+ schema_str = f.read()
82
+ return schema_str
83
+
84
+
85
+ def _output_csv(result_dict: Dict[str, List[BaseModel]], key_name: str) -> str:
86
+ # generate the CSV content from a Dict of URL and list of extracted items
87
+ output = io.StringIO()
88
+ csv_writer = None
89
+ for src_url, items in result_dict.items():
90
+ for item in items:
91
+ value_dict = item.model_dump()
92
+ item_with_url = {**value_dict, key_name: src_url}
93
+
94
+ if csv_writer is None:
95
+ headers = list(value_dict.keys()) + [key_name]
96
+ csv_writer = csv.DictWriter(output, fieldnames=headers)
97
+ csv_writer.writeheader()
98
+
99
+ csv_writer.writerow(item_with_url)
100
+
101
+ csv_content = output.getvalue()
102
+ output.close()
103
+ return csv_content
104
+
105
+
106
  class Ask:
107
 
108
  def __init__(self, logger: Optional[logging.Logger] = None):
 
454
  template = env.from_string(template_str)
455
  return template.render(variables)
456
 
457
+ def _get_target_class(self, extract_schema_str: str) -> TypeVar_BaseModel:
458
+ local_namespace = {"BaseModel": BaseModel}
459
+ exec(extract_schema_str, local_namespace, local_namespace)
460
+ for key, value in local_namespace.items():
461
+ if key == "__builtins__":
462
+ continue
463
+ if key == "BaseModel":
464
+ continue
465
+ if isinstance(value, type):
466
+ if issubclass(value, BaseModel):
467
+ return value
468
+ raise Exception("No Pydantic schema found in the extract schema str.")
469
+
470
  def run_inference(
471
  self,
472
  query: str,
 
540
  response_str = completion.choices[0].message.content
541
  return response_str
542
 
543
+ def run_extract(
544
+ self,
545
+ query: str,
546
+ extract_schema_str: str,
547
+ target_content: str,
548
+ settings: AskSettings,
549
+ ) -> List[TypeVar_BaseModel]:
550
+ target_class = self._get_target_class(extract_schema_str)
551
+ system_prompt = (
552
+ "You are an expert of extract structual information from the document."
553
+ )
554
+ user_promt_template = """
555
+ Given the provided content, if it contains information about {{ query }}, please extract the
556
+ list of structured data items as defined in the following Pydantic schema:
557
+
558
+ {{ extract_schema_str }}
559
+
560
+ Below is the provided content:
561
+ {{ content }}
562
+ """
563
+ user_prompt = self._render_template(
564
+ user_promt_template,
565
+ {
566
+ "query": query,
567
+ "content": target_content,
568
+ "extract_schema_str": extract_schema_str,
569
+ },
570
+ )
571
+
572
+ self.logger.debug(
573
+ f"Running extraction with model: {settings.inference_model_name}"
574
+ )
575
+ self.logger.debug(f"Final user prompt: {user_prompt}")
576
+
577
+ class_name = target_class.__name__
578
+ list_class_name = f"{class_name}_list"
579
+ response_pydantic_model = create_model(
580
+ list_class_name,
581
+ items=(List[target_class], ...),
582
+ )
583
+
584
+ api_client = self._get_api_client()
585
+ completion = api_client.beta.chat.completions.parse(
586
+ model=settings.inference_model_name,
587
+ messages=[
588
+ {
589
+ "role": "system",
590
+ "content": system_prompt,
591
+ },
592
+ {
593
+ "role": "user",
594
+ "content": user_prompt,
595
+ },
596
+ ],
597
+ response_format=response_pydantic_model,
598
+ )
599
+ if completion is None:
600
+ raise Exception("No completion from the API")
601
+
602
+ message = completion.choices[0].message
603
+ if message.refusal:
604
+ raise Exception(
605
+ f"Refused to extract information from the document: {message.refusal}."
606
+ )
607
+
608
+ extract_result = message.parsed
609
+ return extract_result.items
610
+
611
  def run_query_gradio(
612
  self,
613
  query: str,
 
618
  url_list_str: str,
619
  inference_model_name: str,
620
  hybrid_search: bool,
621
+ output_mode_str: str,
622
+ extract_schema_str: str,
623
  ) -> Generator[Tuple[str, str], None, Tuple[str, str]]:
624
  logger = self.logger
625
  log_queue = Queue()
 
637
  url_list=url_list,
638
  inference_model_name=inference_model_name,
639
  hybrid_search=hybrid_search,
640
+ output_mode=OutputMode(output_mode_str),
641
+ extract_schema_str=extract_schema_str,
642
  )
643
 
644
  queue_handler = logging.Handler()
 
675
  logger.info(f"βœ… Scraped {len(scrape_results)} URLs.")
676
  yield "", update_logs()
677
 
678
+ if settings.output_mode == OutputMode.answer:
679
+ logger.info("Chunking the text ...")
680
+ yield "", update_logs()
681
+ chunking_results = self.chunk_results(scrape_results, 1000, 100)
682
+ total_chunks = 0
683
+ for url, chunks in chunking_results.items():
684
+ logger.debug(f"URL: {url}")
685
+ total_chunks += len(chunks)
686
+ for i, chunk in enumerate(chunks):
687
+ logger.debug(f"Chunk {i+1}: {chunk}")
688
+ logger.info(f"βœ… Generated {total_chunks} chunks ...")
689
+ yield "", update_logs()
690
 
691
+ logger.info(f"Saving {total_chunks} chunks to DB ...")
692
+ yield "", update_logs()
693
+ table_name = self.save_chunks_to_db(chunking_results)
694
+ logger.info(f"βœ… Successfully embedded and saved chunks to DB.")
695
+ yield "", update_logs()
696
 
697
+ logger.info("Querying the vector DB to get context ...")
698
+ matched_chunks = self.vector_search(table_name, query, settings)
699
+ for i, result in enumerate(matched_chunks):
700
+ logger.debug(f"{i+1}. {result}")
701
+ logger.info(f"βœ… Got {len(matched_chunks)} matched chunks.")
702
+ yield "", update_logs()
703
 
704
+ logger.info("Running inference with context ...")
705
+ yield "", update_logs()
706
+ answer = self.run_inference(
707
+ query=query,
708
+ matched_chunks=matched_chunks,
709
+ settings=settings,
710
+ )
711
+ logger.info("βœ… Finished inference API call.")
712
+ logger.info("Generating output ...")
713
+ yield "", update_logs()
714
 
715
+ answer = f"# Answer\n\n{answer}\n"
716
+ references = "\n".join(
717
+ [
718
+ f"[{i+1}] {result['url']}"
719
+ for i, result in enumerate(matched_chunks)
720
+ ]
721
+ )
722
+ yield f"{answer}\n\n# References\n\n{references}", update_logs()
723
+ elif settings.output_mode == OutputMode.extract:
724
+ logger.info("Extracting structured data ...")
725
+ yield "", update_logs()
726
+
727
+ aggregated_output = {}
728
+ for url, text in scrape_results.items():
729
+ items = self.run_extract(
730
+ query=query,
731
+ extract_schema_str=extract_schema_str,
732
+ target_content=text,
733
+ settings=settings,
734
+ )
735
+ self.logger.info(
736
+ f"βœ… Finished inference API call. Extracted {len(items)} items from {url}."
737
+ )
738
+ yield "", update_logs()
739
+
740
+ self.logger.debug(items)
741
+ aggregated_output[url] = items
742
+
743
+ logger.info("βœ… Finished extraction from all urls.")
744
+ logger.info("Generating output ...")
745
+ yield "", update_logs()
746
+ answer = _output_csv(aggregated_output, "SourceURL")
747
+ yield f"{answer}", update_logs()
748
+ else:
749
+ raise Exception(f"Invalid output mode: {settings.output_mode}")
750
 
751
  logs = ""
752
  final_result = ""
 
777
  url_list_str=url_list_str,
778
  inference_model_name=settings.inference_model_name,
779
  hybrid_search=settings.hybrid_search,
780
+ output_mode_str=settings.output_mode,
781
+ extract_schema_str=settings.extract_schema_str,
782
  ):
783
  final_result = result
784
  return final_result
 
791
  logger: logging.Logger,
792
  ) -> None:
793
  ask = Ask(logger=logger)
794
+
795
+ def toggle_schema_textbox(option):
796
+ if option == "extract":
797
+ return gr.update(visible=True)
798
+ else:
799
+ return gr.update(visible=False)
800
+
801
  with gr.Blocks() as demo:
802
  gr.Markdown("# Ask.py - Web Search-Extract-Summarize")
803
  gr.Markdown(
 
808
  with gr.Column():
809
 
810
  query_input = gr.Textbox(label="Query", value=query)
811
+ output_mode_input = gr.Radio(
812
+ label="Output Mode [answer: simple answer, extract: get structured data]",
813
+ choices=["answer", "extract"],
814
+ value=init_settings.output_mode,
815
+ )
816
+ extract_schema_input = gr.Textbox(
817
+ label="Extract Pydantic Schema",
818
+ visible=(init_settings.output_mode == "extract"),
819
+ value=init_settings.extract_schema_str,
820
+ lines=5,
821
+ max_lines=20,
822
+ )
823
+ output_mode_input.change(
824
+ fn=toggle_schema_textbox,
825
+ inputs=output_mode_input,
826
+ outputs=extract_schema_input,
827
  )
828
  date_restrict_input = gr.Number(
829
  label="Date Restrict (Optional) [0 or empty means no date limit.]",
 
849
  )
850
 
851
  with gr.Accordion("More Options", open=False):
852
+ hybrid_search_input = gr.Checkbox(
853
+ label="Hybrid Search [Use both vector search and full-text search.]",
854
+ value=init_settings.hybrid_search,
855
+ )
856
  inference_model_name_input = gr.Textbox(
857
  label="Inference Model Name",
858
  value=init_settings.inference_model_name,
 
875
  url_list_input,
876
  inference_model_name_input,
877
  hybrid_search_input,
878
+ output_mode_input,
879
+ extract_schema_input,
880
  ],
881
  outputs=[answer_output, logs_output],
882
  )
 
886
 
887
  @click.command(help="Search web for the query and summarize the results.")
888
  @click.option("--query", "-q", required=False, help="Query to search")
889
+ @click.option(
890
+ "--output-mode",
891
+ "-o",
892
+ type=click.Choice(["answer", "extract"], case_sensitive=False),
893
+ default="answer",
894
+ required=False,
895
+ help="Output mode for the answer, default is a simple answer",
896
+ )
897
  @click.option(
898
  "--date-restrict",
899
  "-d",
 
930
  show_default=True,
931
  help="Instead of doing web search, scrape the target URL list and answer the query based on the content",
932
  )
933
+ @click.option(
934
+ "--extract-schema-file",
935
+ type=str,
936
+ required=False,
937
+ default="",
938
+ show_default=True,
939
+ help="Pydantic schema for the extract mode",
940
+ )
941
  @click.option(
942
  "--inference-model-name",
943
  "-m",
 
966
  )
967
  def search_extract_summarize(
968
  query: str,
969
+ output_mode: str,
970
  date_restrict: int,
971
  target_site: str,
972
  output_language: str,
973
  output_length: int,
974
  url_list_file: str,
975
+ extract_schema_file: str,
976
  inference_model_name: str,
977
  hybrid_search: bool,
978
  web_ui: bool,
 
981
  load_dotenv(dotenv_path=default_env_file, override=False)
982
  logger = _get_logger(log_level)
983
 
984
+ if output_mode == "extract" and not extract_schema_file:
985
+ raise Exception("Extract mode requires the --extract-schema-file argument.")
986
+
987
  settings = AskSettings(
988
  date_restrict=date_restrict,
989
  target_site=target_site,
 
992
  url_list=_read_url_list(url_list_file),
993
  inference_model_name=inference_model_name,
994
  hybrid_search=hybrid_search,
995
+ output_mode=OutputMode(output_mode),
996
+ extract_schema_str=_read_extract_schema_str(extract_schema_file),
997
  )
998
 
999
  if web_ui or os.environ.get("RUN_GRADIO_UI", "false").lower() != "false":