Update app.py
Browse files
app.py
CHANGED
@@ -46,7 +46,7 @@ if 'whisper_model' not in st.session_state:
|
|
46 |
markdown_text = """
|
47 |
# 👋🏻Welcome to [Team](https://huggingface.co/TeamTonic) [Tonic](https://huggingface.co/Tonic) 's Patentable Claims Extractor.
|
48 |
Here you can input audio and text and extract patentable claims from these conversational inputs using [LegalBert](nlpaueb/legal-bert-base-uncased).
|
49 |
-
- Save time and effort when ideating for your future business.
|
50 |
"""
|
51 |
|
52 |
# Render the Markdown content
|
@@ -181,7 +181,7 @@ else:
|
|
181 |
# Process each chunk with the BERT-based model
|
182 |
summaries = []
|
183 |
for chunk in chunks:
|
184 |
-
summary = bert_legal_model(chunk, min_length=
|
185 |
summaries.append(summary)
|
186 |
|
187 |
# Now you have a list of summaries for each chunk
|
|
|
46 |
markdown_text = """
|
47 |
# 👋🏻Welcome to [Team](https://huggingface.co/TeamTonic) [Tonic](https://huggingface.co/Tonic) 's Patentable Claims Extractor.
|
48 |
Here you can input audio and text and extract patentable claims from these conversational inputs using [LegalBert](nlpaueb/legal-bert-base-uncased).
|
49 |
+
- Save time and effort when ideating for your future business. Expect latency upwards of 2.5 hours !
|
50 |
"""
|
51 |
|
52 |
# Render the Markdown content
|
|
|
181 |
# Process each chunk with the BERT-based model
|
182 |
summaries = []
|
183 |
for chunk in chunks:
|
184 |
+
summary = bert_legal_model(chunk, min_length=20, ratio=0.9)
|
185 |
summaries.append(summary)
|
186 |
|
187 |
# Now you have a list of summaries for each chunk
|