Spaces:
Running
Running
File size: 8,121 Bytes
b2e0c78 c0ce244 d9a4942 7ae30ee 2ab4513 ba8b960 5bc18fb b2e0c78 7ae30ee e56c8a4 d9a4942 e56c8a4 ba8b960 d9a4942 542ad54 ba8b960 d9a4942 ba8b960 d9a4942 e56c8a4 ff0f9fc 542ad54 7ae30ee f2d2148 542ad54 9847598 542ad54 9847598 542ad54 9847598 542ad54 ba8b960 d9a4942 e56c8a4 ba8b960 d9a4942 ba8b960 d9a4942 ba8b960 d9a4942 542ad54 d9a4942 ba8b960 d9a4942 7f934fa 542ad54 9847598 ba8b960 d9a4942 ba8b960 d9a4942 7f934fa d9a4942 542ad54 ba8b960 542ad54 d9a4942 ba8b960 d9a4942 ba8b960 7ae30ee ba8b960 7ae30ee ba8b960 7ae30ee ba8b960 ff0f9fc ba8b960 7f934fa ba8b960 7f934fa d9a4942 7f934fa f2d2148 b2e0c78 636fc98 542ad54 c4707d0 d9a4942 7ae30ee d9a4942 9847598 542ad54 9847598 542ad54 9847598 542ad54 9847598 542ad54 9847598 9f42776 d9a4942 9847598 3428389 ba8b960 7f934fa d9a4942 ba8b960 7f934fa d9a4942 9847598 3428389 7f934fa d9a4942 9847598 d9a4942 542ad54 ba8b960 d9a4942 9847598 d9a4942 7f934fa d9a4942 ff0f9fc de88355 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
import gradio as gr
import pandas as pd
import io
import tempfile
import os
from langchain_community.document_loaders import PyPDFLoader
import nltk
from nltk.tokenize import sent_tokenize
# Download NLTK's punkt tokenizer if not already downloaded
nltk.download('punkt_tab')
# Create a temporary directory for storing download files
temp_dir = tempfile.TemporaryDirectory()
def extract_text_with_py_pdf_loader(pdf_file_path, start_page=None, end_page=None):
"""
Extract text from a PDF page by page using LangChain's PyPDFLoader.
Args:
pdf_file_path (str): The file path to the uploaded PDF.
start_page (int, optional): The starting page number for extraction (1-based index).
end_page (int, optional): The ending page number for extraction (1-based index).
Returns:
tuple:
- page_df (pd.DataFrame): DataFrame containing Document, Page, and Text.
- sentence_df (pd.DataFrame): DataFrame containing Document, Page, and Sentence.
"""
try:
# Initialize the loader
loader = PyPDFLoader(pdf_file_path)
documents = loader.load_and_split() # Each document corresponds to a single page
total_pages = len(documents)
doc_name = os.path.basename(pdf_file_path) # Extract document name
# Validate and adjust page range
if start_page is not None and end_page is not None:
# Convert to integers to avoid slicing issues
start_page = int(start_page)
end_page = int(end_page)
# Adjust to valid range
if start_page < 1:
start_page = 1
if end_page > total_pages:
end_page = total_pages
if start_page > end_page:
start_page, end_page = end_page, start_page # Swap if out of order
# Select the subset of documents based on user input
selected_docs = documents[start_page - 1:end_page]
else:
selected_docs = documents
start_page = 1
end_page = total_pages
# Initialize lists to store data
page_data = []
sentence_data = []
for idx, doc in enumerate(selected_docs, start=start_page):
page_num = idx
text = doc.page_content.strip()
# Append page-wise data
page_data.append({
"Document": doc_name,
"Page": page_num,
"Text": text
})
# Sentence tokenization
sentences = sent_tokenize(text)
for sentence in sentences:
sentence = sentence.strip()
if sentence:
sentence_data.append({
"Document": doc_name,
"Page": page_num,
"Sentence": sentence
})
# Create DataFrames
page_df = pd.DataFrame(page_data)
sentence_df = pd.DataFrame(sentence_data)
return page_df, sentence_df
except Exception as e:
raise RuntimeError(f"Error during PDF extraction: {e}")
def df_to_csv_bytes(df):
"""
Convert DataFrame to CSV in bytes.
Args:
df (pd.DataFrame): The DataFrame to convert.
Returns:
bytes: CSV data in bytes.
"""
try:
buffer = io.StringIO()
df.to_csv(buffer, index=False)
csv_data = buffer.getvalue().encode('utf-8')
buffer.close()
return csv_data
except Exception as e:
raise RuntimeError(f"Error during CSV conversion: {e}")
def on_extract(pdf_file_path, extraction_mode, start_page, end_page):
"""
Callback function to extract text from PDF and return CSV data.
Args:
pdf_file_path (str): The file path to the uploaded PDF.
extraction_mode (str): "All Pages" or "Range of Pages".
start_page (float): Starting page number for extraction.
end_page (float): Ending page number for extraction.
Returns:
tuple:
- page_csv_path (str): Path to the page-wise CSV file.
- sentence_csv_path (str): Path to the sentence-wise CSV file.
- status_message (str): Status of the extraction process.
"""
if not pdf_file_path:
return None, None, "No file uploaded."
try:
# Determine page range based on extraction_mode
if extraction_mode == "All Pages":
selected_start = None
selected_end = None
else:
selected_start = start_page
selected_end = end_page
# Extract text and create DataFrames
page_df, sentence_df = extract_text_with_py_pdf_loader(
pdf_file_path,
start_page=selected_start,
end_page=selected_end
)
# Convert DataFrames to CSV bytes
page_csv_bytes = df_to_csv_bytes(page_df)
sentence_csv_bytes = df_to_csv_bytes(sentence_df)
# Define CSV filenames
page_csv_filename = f"{os.path.splitext(os.path.basename(pdf_file_path))[0]}_pages.csv"
sentence_csv_filename = f"{os.path.splitext(os.path.basename(pdf_file_path))[0]}_sentences.csv"
# Define full paths within the temporary directory
page_csv_path = os.path.join(temp_dir.name, page_csv_filename)
sentence_csv_path = os.path.join(temp_dir.name, sentence_csv_filename)
# Write CSV bytes to temporary files
with open(page_csv_path, 'wb') as page_csv_file:
page_csv_file.write(page_csv_bytes)
with open(sentence_csv_path, 'wb') as sentence_csv_file:
sentence_csv_file.write(sentence_csv_bytes)
# Return the paths to the temporary CSV files and a success message
return (
page_csv_path,
sentence_csv_path,
"Extraction successful!"
)
except Exception as e:
return None, None, f"Extraction failed: {e}"
with gr.Blocks() as demo:
gr.Markdown("# 📄 PDF Text Extractor with Multiple Exports")
with gr.Row():
pdf_input = gr.File(
label="Upload PDF",
file_types=[".pdf"],
type="filepath", # Ensure type is set to "filepath"
interactive=True
)
with gr.Row():
extraction_mode = gr.Radio(
label="Extraction Mode",
choices=["All Pages", "Range of Pages"],
value="All Pages",
interactive=True
)
with gr.Row():
start_page = gr.Number(
label="Start Page",
value=1,
precision=0,
interactive=True,
visible=False # Initially hidden
)
end_page = gr.Number(
label="End Page",
value=1,
precision=0,
interactive=True,
visible=False # Initially hidden
)
# Toggle visibility of start_page and end_page based on extraction_mode
extraction_mode.change(
fn=lambda mode: (
gr.update(visible=(mode == "Range of Pages")),
gr.update(visible=(mode == "Range of Pages"))
),
inputs=[extraction_mode],
outputs=[start_page, end_page]
)
with gr.Row():
extract_button = gr.Button("Extract and Download")
with gr.Row():
page_csv_download = gr.File(
label="Download Page-wise CSV",
interactive=False
)
sentence_csv_download = gr.File(
label="Download Sentence-wise CSV",
interactive=False
)
with gr.Row():
status_output = gr.Textbox(
label="Status",
interactive=False,
lines=2
)
extract_button.click(
fn=on_extract,
inputs=[pdf_input, extraction_mode, start_page, end_page],
outputs=[page_csv_download, sentence_csv_download, status_output]
)
gr.Markdown("""
---
Developed with ❤️ using Gradio and LangChain.
""")
# Launch the Gradio app
demo.queue().launch() |