Prathmesh48 commited on
Commit
a4e40bd
1 Parent(s): 683da78

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +172 -0
app.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file: app.py
2
+
3
+ import gradio as gr
4
+ import requests
5
+ import json
6
+ import concurrent.futures
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ from langchain_community.document_loaders import PyPDFLoader
9
+ from langdetect import detect_langs
10
+ from PyPDF2 import PdfReader
11
+ from io import BytesIO
12
+ import logging
13
+ from dotenv import load_dotenv
14
+ import os
15
+
16
+ load_dotenv()
17
+ data = False
18
+ seen = set()
19
+
20
+ main_url = "https://similar-products-api.vercel.app/search/all"
21
+ main_product = "Samsung Galaxy"
22
+
23
+ API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-xxl"
24
+ headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_TOKEN')}"}
25
+
26
+ logging.basicConfig(level=logging.INFO)
27
+
28
+ def get_links(product):
29
+ params = {
30
+ "API_KEY": "12345",
31
+ "product": f"{product}",
32
+ }
33
+ response = requests.get(main_url, params=params)
34
+ if response.status_code == 200:
35
+ results = response.json()
36
+ return results
37
+ else:
38
+ return {}
39
+
40
+ def language_preprocess(text):
41
+ try:
42
+ if detect_langs(text)[0].lang == 'en':
43
+ return True
44
+ return False
45
+ except Exception as e:
46
+ logging.error(f"Language detection error: {e}")
47
+ return False
48
+
49
+ def relevant(product, similar_product, content):
50
+ try:
51
+ payload = {"inputs": f'''Do you think that the given content is similar to {similar_product} and {product}, just Respond True or False \nContent for similar product: {content}'''}
52
+ response = requests.post(API_URL, headers=headers, json=payload)
53
+ output = response.json()
54
+ return bool(output[0]['generated_text'])
55
+ except Exception as e:
56
+ logging.error(f"Relevance checking error: {e}")
57
+ return False
58
+
59
+ def download_pdf(url, timeout=10):
60
+ try:
61
+ response = requests.get(url, timeout=timeout)
62
+ response.raise_for_status()
63
+ return BytesIO(response.content)
64
+ except requests.RequestException as e:
65
+ logging.error(f"PDF download error: {e}")
66
+ return None
67
+
68
+ def extract_text_from_pages(pdf_file, pages):
69
+ reader = PdfReader(pdf_file)
70
+ extracted_text = ""
71
+ try:
72
+ for page_num in pages:
73
+ if page_num < len(reader.pages):
74
+ page = reader.pages[page_num]
75
+ extracted_text += page.extract_text() + "\n"
76
+ else:
77
+ logging.warning(f"Page {page_num} does not exist in the document.")
78
+ return extracted_text
79
+ except Exception as e:
80
+ logging.error(f"PDF text extraction error: {e}")
81
+ return 'हे चालत नाही'
82
+
83
+ def process_link(link, similar_product):
84
+ if link in seen:
85
+ return None
86
+ seen.add(link)
87
+ try:
88
+ pdf_file = download_pdf(link)
89
+ if pdf_file:
90
+ text = extract_text_from_pages(pdf_file, [0, 2, 4])
91
+ if language_preprocess(text):
92
+ if relevant(main_product, similar_product, text):
93
+ return link
94
+ except Exception as e:
95
+ logging.error(f"Error processing link: {e}")
96
+ return None
97
+
98
+ def filtering(urls, similar_product):
99
+ res = []
100
+ with ThreadPoolExecutor() as executor:
101
+ futures = {executor.submit(process_link, link, similar_product): link for link in urls}
102
+ for future in concurrent.futures.as_completed(futures):
103
+ result = future.result()
104
+ if result is not None:
105
+ res.append(result)
106
+ return res
107
+
108
+ def wikipedia_url(product):
109
+ api_url = "https://en.wikipedia.org/w/api.php"
110
+ params = {
111
+ "action": "opensearch",
112
+ "search": product,
113
+ "limit": 5,
114
+ "namespace": 0,
115
+ "format": "json"
116
+ }
117
+ try:
118
+ response = requests.get(api_url, params=params)
119
+ response.raise_for_status()
120
+ data = response.json()
121
+ if data and len(data) > 3 and len(data[3]) > 0:
122
+ return data[3]
123
+ else:
124
+ return []
125
+ except requests.RequestException as e:
126
+ logging.error(f"Error fetching Wikipedia URLs: {e}")
127
+ return []
128
+
129
+ def preprocess_initial(product):
130
+ return get_links(product)
131
+
132
+ def preprocess_filter(product, data):
133
+ for similar_product in data:
134
+ if similar_product != product:
135
+ if list(data[similar_product][0])[0] == 'duckduckgo':
136
+ s = set(('duckduckgo', 'google', 'archive'))
137
+ temp = []
138
+
139
+ for idx, item in enumerate(data[similar_product]):
140
+ if list(item)[0] in s:
141
+ urls = data[similar_product][idx][list(item)[0]]
142
+ temp += filtering(urls, similar_product)
143
+ else:
144
+ temp += data[similar_product][idx][list(item)[0]]
145
+
146
+ data[similar_product] = temp
147
+ data[similar_product] += wikipedia_url(similar_product)
148
+ else:
149
+ urls = data[similar_product]
150
+ data[similar_product] = filtering(urls, similar_product)
151
+ data[similar_product] += wikipedia_url(similar_product)
152
+ logging.info('Filtering completed')
153
+ return data
154
+
155
+ def main(product_name):
156
+ return preprocess_initial(product_name)
157
+
158
+ def filter_links(product_name, initial_data):
159
+ return preprocess_filter(product_name, initial_data)
160
+
161
+ with gr.Blocks() as demo:
162
+ product_name = gr.Textbox(label="Product Name")
163
+ get_links_btn = gr.Button("Get Links")
164
+ initial_links_output = gr.JSON()
165
+ filter_btn = gr.Button("Filter Links")
166
+ filtered_links_output = gr.JSON()
167
+
168
+ get_links_btn.click(fn=main, inputs=product_name, outputs=initial_links_output)
169
+ filter_btn.click(fn=filter_links, inputs=[product_name, initial_links_output], outputs=filtered_links_output)
170
+
171
+ if __name__ == "__main__":
172
+ demo.launch()