Update app.py
Browse filesStreamlit st.cache --> st.cache_resource
app.py
CHANGED
@@ -47,19 +47,19 @@ elif input_method == "Write or Paste New Text":
|
|
47 |
st.subheader("Text to Run")
|
48 |
input_text = st.text_area('Write or Paste Text Below', value="", height=128, max_chars=None, key=2)
|
49 |
|
50 |
-
@st.
|
51 |
def setModel(model_checkpoint, aggregation):
|
52 |
model = AutoModelForTokenClassification.from_pretrained(model_checkpoint)
|
53 |
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
|
54 |
return pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy=aggregation)
|
55 |
|
56 |
-
@st.
|
57 |
def get_html(html: str):
|
58 |
WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem">{}</div>"""
|
59 |
html = html.replace("\n", " ")
|
60 |
return WRAPPER.format(html)
|
61 |
|
62 |
-
@st.
|
63 |
def entity_comb(output):
|
64 |
output_comb = []
|
65 |
for ind, entity in enumerate(output):
|
|
|
47 |
st.subheader("Text to Run")
|
48 |
input_text = st.text_area('Write or Paste Text Below', value="", height=128, max_chars=None, key=2)
|
49 |
|
50 |
+
@st.cache_resource
|
51 |
def setModel(model_checkpoint, aggregation):
|
52 |
model = AutoModelForTokenClassification.from_pretrained(model_checkpoint)
|
53 |
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
|
54 |
return pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy=aggregation)
|
55 |
|
56 |
+
@st.cache_resource
|
57 |
def get_html(html: str):
|
58 |
WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem">{}</div>"""
|
59 |
html = html.replace("\n", " ")
|
60 |
return WRAPPER.format(html)
|
61 |
|
62 |
+
@st.cache_resource
|
63 |
def entity_comb(output):
|
64 |
output_comb = []
|
65 |
for ind, entity in enumerate(output):
|