Spaces:
Sleeping
Sleeping
peterpeter8585
commited on
Commit
•
aa933fe
1
Parent(s):
a20cdd1
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,92 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
def respond(
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
+
import requests
|
5 |
+
from bs4 import BeautifulSoup
|
6 |
+
import urllib
|
7 |
+
import random
|
8 |
|
9 |
+
# List of user agents to choose from for requests
|
10 |
+
_useragent_list = [
|
11 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
|
12 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
13 |
+
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
14 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
|
15 |
+
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
16 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62',
|
17 |
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0'
|
18 |
+
]
|
19 |
+
|
20 |
+
def get_useragent():
|
21 |
+
"""Returns a random user agent from the list."""
|
22 |
+
return random.choice(_useragent_list)
|
23 |
+
|
24 |
+
def extract_text_from_webpage(html_content):
|
25 |
+
"""Extracts visible text from HTML content using BeautifulSoup."""
|
26 |
+
soup = BeautifulSoup(html_content, "html.parser")
|
27 |
+
# Remove unwanted tags
|
28 |
+
for tag in soup(["script", "style", "header", "footer", "nav"]):
|
29 |
+
tag.extract()
|
30 |
+
# Get the remaining visible text
|
31 |
+
visible_text = soup.get_text(strip=True)
|
32 |
+
return visible_text
|
33 |
+
|
34 |
+
def search(term, num_results=1, lang="en", advanced=True, sleep_interval=0, timeout=5, safe="active", ssl_verify=None):
|
35 |
+
"""Performs a Google search and returns the results."""
|
36 |
+
escaped_term = urllib.parse.quote_plus(term)
|
37 |
+
start = 0
|
38 |
+
all_results = []
|
39 |
+
|
40 |
+
# Fetch results in batches
|
41 |
+
while start < num_results:
|
42 |
+
resp = requests.get(
|
43 |
+
url="https://www.google.com/search",
|
44 |
+
headers={"User-Agent": get_useragent()}, # Set random user agent
|
45 |
+
params={
|
46 |
+
"q": term,
|
47 |
+
"num": num_results - start, # Number of results to fetch in this batch
|
48 |
+
"hl": lang,
|
49 |
+
"start": start,
|
50 |
+
"safe": safe,
|
51 |
+
},
|
52 |
+
timeout=timeout,
|
53 |
+
verify=ssl_verify,
|
54 |
+
)
|
55 |
+
resp.raise_for_status() # Raise an exception if request fails
|
56 |
+
|
57 |
+
soup = BeautifulSoup(resp.text, "html.parser")
|
58 |
+
result_block = soup.find_all("div", attrs={"class": "g"})
|
59 |
+
|
60 |
+
# If no results, continue to the next batch
|
61 |
+
if not result_block:
|
62 |
+
start += 1
|
63 |
+
continue
|
64 |
+
|
65 |
+
# Extract link and text from each result
|
66 |
+
for result in result_block:
|
67 |
+
link = result.find("a", href=True)
|
68 |
+
if link:
|
69 |
+
link = link["href"]
|
70 |
+
try:
|
71 |
+
# Fetch webpage content
|
72 |
+
webpage = requests.get(link, headers={"User-Agent": get_useragent()})
|
73 |
+
webpage.raise_for_status()
|
74 |
+
# Extract visible text from webpage
|
75 |
+
visible_text = extract_text_from_webpage(webpage.text)
|
76 |
+
all_results.append({"link": link, "text": visible_text})
|
77 |
+
except requests.exceptions.RequestException as e:
|
78 |
+
# Handle errors fetching or processing webpage
|
79 |
+
print(f"Error fetching or processing {link}: {e}")
|
80 |
+
all_results.append({"link": link, "text": None})
|
81 |
+
else:
|
82 |
+
all_results.append({"link": None, "text": None})
|
83 |
+
|
84 |
+
start += len(result_block) # Update starting index for next batch
|
85 |
+
|
86 |
+
return all_results
|
87 |
+
|
88 |
+
|
89 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
90 |
|
91 |
|
92 |
def respond(
|