|
from pptx import Presentation |
|
import gradio as gr |
|
from pdf2image import convert_from_path |
|
import pdfplumber |
|
from docx import Document |
|
import subprocess |
|
import os |
|
from typing import Optional, List |
|
import string |
|
import random |
|
import re |
|
import requests |
|
from bs4 import BeautifulSoup |
|
import random |
|
import logging |
|
import time |
|
from urllib.parse import urlparse |
|
|
|
import requests |
|
from bs4 import BeautifulSoup |
|
import random |
|
import logging |
|
import time |
|
from urllib.parse import urlparse |
|
|
|
class URLTextExtractor: |
|
""" |
|
A comprehensive utility for extracting text content from web pages with advanced features. |
|
|
|
Features: |
|
- Rotating User-Agents to mimic different browsers |
|
- Robust error handling and retry mechanism |
|
- Section preservation for maintaining document structure |
|
- Configurable extraction options |
|
- Logging support |
|
|
|
Attributes: |
|
USER_AGENTS (list): A comprehensive list of user agent strings to rotate through. |
|
logger (logging.Logger): Logger for tracking extraction attempts and errors. |
|
|
|
Example: |
|
>>> extractor = URLTextExtractor() |
|
>>> text = extractor.extract_text_from_url('https://example.com') |
|
>>> print(text) |
|
""" |
|
|
|
|
|
USER_AGENTS = [ |
|
|
|
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', |
|
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.1 Safari/605.1.15', |
|
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:95.0) Gecko/20100101 Firefox/95.0', |
|
|
|
|
|
'Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Mobile/15E148 Safari/604.1', |
|
'Mozilla/5.0 (Linux; Android 10; SM-G970F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Mobile Safari/537.36', |
|
] |
|
|
|
def __init__(self, logger=None): |
|
""" |
|
Initialize the URLTextExtractor. |
|
|
|
Args: |
|
logger (logging.Logger, optional): Custom logger. |
|
If not provided, creates a default logger. |
|
""" |
|
self.logger = logger or self._create_default_logger() |
|
|
|
def _create_default_logger(self): |
|
""" |
|
Create a default logger for tracking extraction process. |
|
|
|
Returns: |
|
logging.Logger: Configured logger instance |
|
""" |
|
logger = logging.getLogger(__name__) |
|
logger.setLevel(logging.INFO) |
|
handler = logging.StreamHandler() |
|
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') |
|
handler.setFormatter(formatter) |
|
logger.addHandler(handler) |
|
return logger |
|
|
|
def _process_element_text(self, element): |
|
""" |
|
Process text within an element, handling anchor tags specially. |
|
|
|
Args: |
|
element (bs4.element.Tag): BeautifulSoup element to process |
|
|
|
Returns: |
|
str: Processed text with proper spacing |
|
""" |
|
|
|
for a_tag in element.find_all('a'): |
|
|
|
a_tag.replace_with(f' {a_tag.get_text(strip=True)} ') |
|
|
|
|
|
return element.get_text(separator=' ', strip=True) |
|
|
|
def extract_text_from_url(self, url, max_retries=3, preserve_sections=True, |
|
min_section_length=30, allowed_tags=None): |
|
""" |
|
Extract text content from a given URL with advanced configuration. |
|
|
|
Args: |
|
url (str): The URL of the webpage to extract text from. |
|
max_retries (int, optional): Maximum number of retry attempts. Defaults to 3. |
|
preserve_sections (bool, optional): Whether to preserve section separations. Defaults to True. |
|
min_section_length (int, optional): Minimum length of text sections to include. Defaults to 30. |
|
allowed_tags (list, optional): Specific HTML tags to extract text from. |
|
If None, uses a default set of content-rich tags. |
|
|
|
Returns: |
|
str: Extracted text content from the webpage |
|
|
|
Raises: |
|
ValueError: If URL cannot be fetched after maximum retries |
|
requests.RequestException: For network-related errors |
|
|
|
Examples: |
|
>>> extractor = URLTextExtractor() |
|
>>> text = extractor.extract_text_from_url('https://example.com') |
|
>>> text = extractor.extract_text_from_url('https://example.com', preserve_sections=False) |
|
""" |
|
|
|
if allowed_tags is None: |
|
allowed_tags = ['p', 'div', 'article', 'section', 'main', |
|
'h1', 'h2', 'h3', 'h4', 'h5', 'h6'] |
|
|
|
|
|
try: |
|
parsed_url = urlparse(url) |
|
if not all([parsed_url.scheme, parsed_url.netloc]): |
|
|
|
return None |
|
except Exception as e: |
|
self.logger.error(f"URL parsing error: {e}") |
|
raise |
|
|
|
for attempt in range(max_retries): |
|
try: |
|
|
|
headers = { |
|
'User-Agent': random.choice(self.USER_AGENTS), |
|
'Accept-Language': 'en-US,en;q=0.9', |
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8' |
|
} |
|
|
|
|
|
response = requests.get( |
|
url, |
|
headers=headers, |
|
timeout=10, |
|
allow_redirects=True |
|
) |
|
|
|
|
|
response.raise_for_status() |
|
|
|
|
|
self.logger.info(f"Successfully fetched URL: {url}") |
|
|
|
|
|
soup = BeautifulSoup(response.text, 'html.parser') |
|
|
|
|
|
for script in soup(["script", "style", "head", "header", "footer", "nav"]): |
|
script.decompose() |
|
|
|
|
|
if preserve_sections: |
|
|
|
sections = [] |
|
for tag in allowed_tags: |
|
for element in soup.find_all(tag): |
|
|
|
section_text = self._process_element_text(element) |
|
|
|
|
|
if len(section_text) >= min_section_length: |
|
sections.append(section_text) |
|
|
|
|
|
text = '\n'.join(sections) |
|
else: |
|
|
|
text = ' '.join(self._process_element_text(element) |
|
for tag in allowed_tags |
|
for element in soup.find_all(tag)) |
|
|
|
|
|
text = '\n'.join(line.strip() for line in text.split('\n') if line.strip()) |
|
|
|
return text |
|
|
|
except (requests.RequestException, ValueError) as e: |
|
|
|
self.logger.warning(f"Attempt {attempt + 1} failed: {e}") |
|
|
|
|
|
if attempt == max_retries - 1: |
|
self.logger.error(f"Failed to fetch URL after {max_retries} attempts") |
|
raise ValueError(f"Error fetching URL after {max_retries} attempts: {e}") |
|
|
|
|
|
wait_time = 2 ** attempt |
|
self.logger.info(f"Waiting {wait_time} seconds before retry") |
|
time.sleep(wait_time) |
|
|
|
|
|
return None |
|
|
|
def extract_text_from_pptx(file_path): |
|
prs = Presentation(file_path) |
|
text_content = [] |
|
|
|
for slide in prs.slides: |
|
slide_text = [] |
|
for shape in slide.shapes: |
|
if hasattr(shape, "text"): |
|
slide_text.append(shape.text) |
|
text_content.append("\n".join(slide_text)) |
|
|
|
return "\n\n".join(text_content) |
|
|
|
|
|
def extract_text_from_ppt(file_path): |
|
try: |
|
|
|
pptx_file_path = os.path.splitext(file_path)[0] + ".pptx" |
|
subprocess.run(["unoconv", "-f", "pptx", file_path], check=True) |
|
|
|
|
|
presentation = Presentation(pptx_file_path) |
|
text_content = [] |
|
|
|
for slide in presentation.slides: |
|
slide_text = [] |
|
for shape in slide.shapes: |
|
if hasattr(shape, "text"): |
|
slide_text.append(shape.text) |
|
text_content.append("\n".join(slide_text)) |
|
|
|
|
|
os.remove(pptx_file_path) |
|
|
|
return "\n\n".join(text_content) |
|
except Exception as e: |
|
print(f"Error extracting text from PPT file: {e}") |
|
return "Error extracting text from PPT file" |
|
|
|
|
|
def extract_text_from_ppt_or_pptx(file_path): |
|
if file_path.endswith(".pptx"): |
|
return extract_text_from_pptx(file_path) |
|
elif file_path.endswith(".ppt"): |
|
return extract_text_from_ppt(file_path) |
|
else: |
|
return "Unsupported file type. Please provide a .ppt or .pptx file." |
|
|
|
|
|
def convert_pdf_to_image(file): |
|
images = convert_from_path(file) |
|
return images |
|
|
|
|
|
def extract_text_from_pdf(file): |
|
text = "" |
|
with pdfplumber.open(file) as pdf: |
|
for page in pdf.pages: |
|
text += page.extract_text() + "\n" |
|
return text |
|
|
|
|
|
def extract_text_from_docx(file): |
|
text = "" |
|
doc = Document(file.name) |
|
for paragraph in doc.paragraphs: |
|
text += paragraph.text + "\n" |
|
return text |
|
|
|
|
|
def convert_doc_to_text(doc_path): |
|
try: |
|
subprocess.run( |
|
["unoconv", "--format", "txt", doc_path], |
|
capture_output=True, |
|
text=True, |
|
check=True, |
|
) |
|
txt_file_path = doc_path.replace(".doc", ".txt") |
|
with open(txt_file_path, "r") as f: |
|
text = f.read() |
|
text = text.lstrip("\ufeff") |
|
os.remove(txt_file_path) |
|
return text |
|
except subprocess.CalledProcessError as e: |
|
print(f"Error converting {doc_path} to text: {e}") |
|
return "" |
|
|
|
|
|
def extract_text_from_doc_or_docx(file): |
|
if file.name.endswith(".docx"): |
|
return extract_text_from_docx(file) |
|
elif file.name.endswith(".doc"): |
|
return convert_doc_to_text(file.name) |
|
else: |
|
return "Unsupported file type. Please upload a .doc or .docx file." |
|
|
|
|
|
|
|
def generate_random_string(length=23): |
|
characters = string.ascii_letters + string.digits |
|
random_string = "".join(random.choice(characters) for _ in range(length)) |
|
return random_string |
|
|
|
|
|
|
|
def handle_json_output(json_list: list): |
|
n = len(json_list) |
|
for i in range(n): |
|
|
|
random_string1 = generate_random_string() |
|
random_string2 = generate_random_string() |
|
element = json_list[i] |
|
front = element["frontText"] |
|
back = element["backText"] |
|
element["frontHTML"] = ( |
|
f'<div id="element-richtextarea-{random_string1}" style="position:absolute;left:100px;top:50px;width:800px;height:300px;text-align:center;display:flex;align-items:center;font-size:40px;">' |
|
f"<p>{front}</p></div>" |
|
) |
|
element["backHTML"] = ( |
|
f'<div id="element-richtextarea-{random_string2}" style="position:absolute;left:100px;top:50px;width:800px;height:300px;text-align:center;display:flex;align-items:center;font-size:40px;">' |
|
f"<p>{back}</p></div>" |
|
) |
|
element["termType"] = "basic" |
|
cloze_matches = re.findall(r"_{2,}", front) |
|
|
|
if (cloze_matches != []) & (len(cloze_matches) <= 2): |
|
|
|
element["termType"] = "cloze" |
|
|
|
|
|
def replace_cloze(match): |
|
return f'</p><p><span class="closure">{back}</span></p><p>' |
|
|
|
front_html = re.sub(r"_{2,}", replace_cloze, front) |
|
element["frontHTML"] = ( |
|
f'<div id="element-richtextarea-{random_string1}" style="position:absolute;left:100px;top:50px;width:800px;height:300px;text-align:center;display:flex;align-items:center;font-size:40px;">' |
|
f"<p>{front_html}</p></div>" |
|
) |
|
|
|
def replace_underscores(match): |
|
return f" {back} " |
|
|
|
element["frontText"] = re.sub(r"_{2,}", replace_underscores, front) |
|
element["backText"] = "" |
|
|
|
element["backHTML"] = ( |
|
f'<div id="element-richtextarea-{random_string2}" style="position:absolute;left:100px;top:50px;width:800px;height:300px;text-align:center;display:flex;align-items:center;font-size:40px;">' |
|
f"<p><br></p></div>" |
|
) |
|
|
|
return json_list |
|
|
|
|
|
def sanitize_list_of_lists(text: str) -> Optional[List[List]]: |
|
left = text.find("[") |
|
right = text.rfind("]") |
|
text = text[left : right + 1] |
|
try: |
|
|
|
list_of_lists = eval(text) |
|
if isinstance(list_of_lists, list): |
|
out = [] |
|
try: |
|
|
|
for front, back in list_of_lists: |
|
out.append({"frontText": front, "backText": back}) |
|
return handle_json_output(out) |
|
|
|
except Exception as e: |
|
print(e) |
|
|
|
if out != []: |
|
return handle_json_output(out) |
|
|
|
else: |
|
return None |
|
else: |
|
print("The evaluated object is not a list.") |
|
return None |
|
except Exception as e: |
|
print(f"Error parsing the list of lists: {e}") |
|
return None |
|
|
|
extractor = URLTextExtractor() |
|
def parse_url(url): |
|
return extractor.extract_text_from_url(url) |
|
|
|
pdf_to_img = gr.Interface( |
|
convert_pdf_to_image, gr.File(), gr.Gallery(), api_name="pdf_to_img" |
|
) |
|
pdf_to_text = gr.Interface( |
|
extract_text_from_pdf, |
|
gr.File(), |
|
gr.Textbox(placeholder="Extracted text will appear here"), |
|
api_name="pdf_to_text", |
|
) |
|
|
|
doc_or_docx_to_text = gr.Interface( |
|
extract_text_from_doc_or_docx, |
|
gr.File(), |
|
gr.Textbox(placeholder="Extracted text from DOC or DOCX will appear here"), |
|
api_name="doc_or_docx_to_text", |
|
) |
|
|
|
pptx_or_ppt_to_text = gr.Interface( |
|
extract_text_from_ppt_or_pptx, |
|
gr.File(), |
|
gr.Textbox(placeholder="Extracted text from PPTX will appear here"), |
|
api_name="pptx_or_ppt_to_text", |
|
) |
|
|
|
str_to_json = gr.Interface( |
|
sanitize_list_of_lists, |
|
gr.Text(), |
|
gr.JSON(), |
|
api_name="str_to_json", |
|
examples=[ |
|
"""[ |
|
["What year was the Carthaginian Empire founded?", "Around 814 BCE"], |
|
["Where was the center of the Carthaginian Empire located?", "Carthage, near present-day Tunis, Tunisia"], |
|
["Which powerful ancient republic did Carthage have conflicts with?", "The Roman Republic"], |
|
["Fill in the blank: Hannibal famously crossed the ________ with war elephants.", "Alps"], |
|
["What were the series of conflicts between Carthage and Rome called?", "The Punic Wars"], |
|
["Multiple Choice: What was a significant military advantage of Carthage? A) Strong infantry, B) Powerful navy, C) Fortified cities", "B) Powerful navy"], |
|
["In what year was Carthage captured and destroyed by Rome?", "146 BCE"], |
|
["What did Carthage excel in that allowed it to amass wealth?", "Maritime trade"] |
|
]""" |
|
], |
|
) |
|
|
|
url_parser = gr.Interface( |
|
parse_url, |
|
inputs=["text"], |
|
outputs=["text"], |
|
api_name="url_to_text", |
|
) |
|
demo = gr.TabbedInterface( |
|
[pdf_to_img, pdf_to_text, doc_or_docx_to_text, pptx_or_ppt_to_text, url_parser, str_to_json], |
|
[ |
|
"PDF to Image", |
|
"Extract PDF Text", |
|
"Extract DOC/DOCX Text", |
|
"Extract PPTX/PPT Text", |
|
"Extract text from URL", |
|
"Extract Json", |
|
], |
|
) |
|
|
|
demo.launch(server_name="0.0.0.0.", server_port=7860, debug=True) |
|
|