Spaces:
Sleeping
Sleeping
File size: 4,365 Bytes
138c09e 4d3e890 138c09e 4d3e890 138c09e 4d3e890 138c09e 4d3e890 138c09e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import argparse
import os
import sys
import uvicorn
from fastapi import FastAPI, Depends
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import BaseModel, Field
from typing import Union
from sse_starlette.sse import EventSourceResponse, ServerSentEvent
from utils.logger import logger
from networks.google_searcher import GoogleSearcher
from networks.html_fetcher import HTMLFetcher
from documents.query_results_extractor import QueryResultsExtractor
from documents.webpage_content_extractor import WebpageContentExtractor
from utils.logger import logger
class SearchAPIApp:
def __init__(self):
self.app = FastAPI(
docs_url="/",
title="Web Search API",
swagger_ui_parameters={"defaultModelsExpandDepth": -1},
version="1.0",
)
self.setup_routes()
class QueriesToSearchResultsPostItem(BaseModel):
queries: list = Field(
default=[""],
description="(list[str]) Queries to search",
)
result_num: int = Field(
default=10,
description="(int) Number of search results",
)
safe: bool = Field(
default=False,
description="(bool) Enable SafeSearch",
)
types: list = Field(
default=["web"],
description="(list[str]) Types of search results: `web`, `image`, `videos`, `news`",
)
extract_content: bool = Field(
default=False,
description="(bool) Enable extracting main text contents from webpage, will add `text` filed in each `query_result` dict",
)
def queries_to_search_results(self, item: QueriesToSearchResultsPostItem):
google_searcher = GoogleSearcher()
query_results_extractor = QueryResultsExtractor()
queries_search_results = []
for query in item.queries:
if not query.strip():
continue
query_html_path = google_searcher.search(
query=query, result_num=item.result_num, safe=item.safe
)
query_search_results = query_results_extractor.extract(query_html_path)
queries_search_results.append(query_search_results)
logger.note(queries_search_results)
if item.extract_content:
html_fetcher = HTMLFetcher()
webpage_content_extractor = WebpageContentExtractor()
for query_idx, query_search_result in enumerate(queries_search_results):
for query_result_idx, query_result in enumerate(
query_search_result["query_results"]
):
html_path = html_fetcher.fetch(query_result["url"])
extracted_content = webpage_content_extractor.extract(html_path)
queries_search_results[query_idx]["query_results"][
query_result_idx
]["text"] = extracted_content
return queries_search_results
def setup_routes(self):
self.app.post(
"/queries_to_search_results",
summary="Search queries, and extract contents from results",
)(self.queries_to_search_results)
class ArgParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(ArgParser, self).__init__(*args, **kwargs)
self.add_argument(
"-s",
"--server",
type=str,
default="0.0.0.0",
help="Server IP for Web Search API",
)
self.add_argument(
"-p",
"--port",
type=int,
default=21111,
help="Server Port for Web Search API",
)
self.add_argument(
"-d",
"--dev",
default=False,
action="store_true",
help="Run in dev mode",
)
self.args = self.parse_args(sys.argv[1:])
app = SearchAPIApp().app
if __name__ == "__main__":
args = ArgParser().args
if args.dev:
uvicorn.run("__main__:app", host=args.server, port=args.port, reload=True)
else:
uvicorn.run("__main__:app", host=args.server, port=args.port, reload=False)
# python -m apis.search_api # [Docker] in product mode
# python -m apis.search_api -d # [Dev] in develop mode
|