Spaces:
Sleeping
Sleeping
kz209
commited on
Commit
β’
de53991
1
Parent(s):
99a8f27
change from streamlit to gradio
Browse files- app.py +28 -11
- pages/arena.py +7 -19
- pages/summarization_example.py +36 -55
- test.py +57 -0
- utils/multiple_stream.py +46 -20
app.py
CHANGED
@@ -1,18 +1,35 @@
|
|
1 |
-
import
|
2 |
|
3 |
-
|
4 |
-
|
5 |
-
page_icon=":rocket:",
|
6 |
-
)
|
7 |
|
8 |
-
|
|
|
|
|
9 |
|
10 |
-
st.markdown(
|
11 |
-
"""
|
12 |
This application is for **internal use** and is designed to facilitate **fast prototyping** and **experimentation.**
|
13 |
|
14 |
-
|
15 |
"""
|
16 |
-
)
|
17 |
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
|
3 |
+
from pages.arena import create_arena
|
4 |
+
from pages.summarization_example import create_summarization_interface
|
|
|
|
|
5 |
|
6 |
+
def welcome_message():
|
7 |
+
return """
|
8 |
+
## Summarization Projects Demo :rocket:
|
9 |
|
|
|
|
|
10 |
This application is for **internal use** and is designed to facilitate **fast prototyping** and **experimentation.**
|
11 |
|
12 |
+
Select a demo from the sidebar below to begin experimentation.
|
13 |
"""
|
|
|
14 |
|
15 |
+
with gr.Blocks() as demo:
|
16 |
+
with gr.Column(scale=4):
|
17 |
+
content = content = gr.Blocks(
|
18 |
+
gr.Markdown(
|
19 |
+
welcome_message()
|
20 |
+
)
|
21 |
+
)
|
22 |
+
|
23 |
+
with gr.Tabs() as tabs:
|
24 |
+
with gr.TabItem("Streaming"):
|
25 |
+
create_arena()
|
26 |
+
with gr.TabItem("Summarization"):
|
27 |
+
create_summarization_interface()
|
28 |
+
# with gr.TabItem("Page 3"):
|
29 |
+
# page3()
|
30 |
+
|
31 |
+
|
32 |
+
if __name__ == "__main__":
|
33 |
+
demo.launch()
|
34 |
+
|
35 |
+
#iface.launch() # launch the Gradio app
|
pages/arena.py
CHANGED
@@ -1,21 +1,9 @@
|
|
1 |
-
import
|
2 |
-
from
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
from utils.multiple_stream import stream_data_in_column
|
11 |
-
|
12 |
-
if st.button("Stream data"):
|
13 |
-
# Define layout
|
14 |
-
columns = st.columns(2)
|
15 |
-
|
16 |
-
# Submit concurrent tasks
|
17 |
-
with ThreadPoolExecutor(max_workers=2) as executor:
|
18 |
-
ctx = get_script_run_ctx()
|
19 |
-
futures = [
|
20 |
-
executor.submit(stream_data_in_column, col, ctx) for col in columns
|
21 |
-
]
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from utils.multiple_stream import create_interface
|
3 |
|
4 |
+
def create_arena():
|
5 |
+
demo = create_interface()
|
6 |
+
#demo.queue()
|
7 |
+
#demo.launch()
|
8 |
|
9 |
+
return demo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pages/summarization_example.py
CHANGED
@@ -1,10 +1,8 @@
|
|
1 |
from dotenv import load_dotenv
|
2 |
-
from
|
3 |
-
|
4 |
-
import streamlit as st
|
5 |
|
6 |
load_dotenv()
|
7 |
-
st.write('## This is an example to show summarization')
|
8 |
|
9 |
examples = {
|
10 |
"example 1": """Boston's injury reporting for Kristaps PorziΕΔ£is has been fairly coy. He missed Game 3, but his coach told reporters just before Game 4 that was technically available, but with a catch.
|
@@ -21,63 +19,46 @@ All of that has led to postseason averages of 8.2 points, 7.6 rebounds, 1.4 assi
|
|
21 |
Back in Boston, Kidd is going to rely on Lively even more. He'll play close to 30 minutes and reach double-figures in both scoring and rebounding again.""",
|
22 |
}
|
23 |
|
24 |
-
def generate_answer(
|
25 |
meta_prompt = """
|
26 |
{sources}
|
27 |
|
28 |
-
summarization: """
|
29 |
-
content = meta_prompt.format(
|
30 |
-
sources=sources,
|
31 |
-
)
|
32 |
-
|
33 |
-
from transformers import pipeline
|
34 |
|
35 |
-
messages = [
|
36 |
-
{"role": "user", "content": content},
|
37 |
-
]
|
38 |
pipe = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True, max_length=500)
|
|
|
39 |
answer = pipe(messages)
|
40 |
|
41 |
-
|
42 |
-
# temperature=0.8,
|
43 |
-
# max_tokens=800,
|
44 |
-
# messages=[
|
45 |
-
# {
|
46 |
-
# "role": "user",
|
47 |
-
# "content": content,
|
48 |
-
# },
|
49 |
-
# ],
|
50 |
-
# model=model_name,
|
51 |
-
# )
|
52 |
-
|
53 |
-
return answer
|
54 |
-
|
55 |
-
example_selection = st.selectbox("Choose an example", options=list(examples.keys()), index=0)
|
56 |
-
model_selection = st.selectbox("Choose a model", options=[
|
57 |
-
"gpt-3.5-turbo",
|
58 |
-
"gpt-4o",
|
59 |
-
"gpt-4"
|
60 |
-
], index=0)
|
61 |
-
|
62 |
-
# Input fields
|
63 |
-
input_text1 = st.text_area("question", height=None, \
|
64 |
-
placeholder="Enter first text here...", value=examples[example_selection])
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
# Button to trigger processing
|
69 |
-
#lm = OpenAI()
|
70 |
-
|
71 |
-
if st.button('Submit'):
|
72 |
-
if input_text1:
|
73 |
-
response = generate_answer('', input_text1, model_selection)
|
74 |
-
st.write('## Orginal Article:')
|
75 |
-
st.markdown(examples[example_selection])
|
76 |
-
|
77 |
-
st.write('## Summarization:')
|
78 |
-
st.markdown(response.choices[0].message.content)
|
79 |
|
|
|
|
|
|
|
|
|
80 |
else:
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from dotenv import load_dotenv
|
2 |
+
from transformers import pipeline
|
3 |
+
import gradio as gr
|
|
|
4 |
|
5 |
load_dotenv()
|
|
|
6 |
|
7 |
examples = {
|
8 |
"example 1": """Boston's injury reporting for Kristaps PorziΕΔ£is has been fairly coy. He missed Game 3, but his coach told reporters just before Game 4 that was technically available, but with a catch.
|
|
|
19 |
Back in Boston, Kidd is going to rely on Lively even more. He'll play close to 30 minutes and reach double-figures in both scoring and rebounding again.""",
|
20 |
}
|
21 |
|
22 |
+
def generate_answer(sources, model_name):
|
23 |
meta_prompt = """
|
24 |
{sources}
|
25 |
|
26 |
+
summarization: """
|
27 |
+
content = meta_prompt.format(sources=sources)
|
|
|
|
|
|
|
|
|
28 |
|
|
|
|
|
|
|
29 |
pipe = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True, max_length=500)
|
30 |
+
messages = [{"role": "user", "content": content}]
|
31 |
answer = pipe(messages)
|
32 |
|
33 |
+
return answer[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
def process_input(input_text, model_selection):
|
36 |
+
if input_text:
|
37 |
+
response = generate_answer(input_text, model_selection)
|
38 |
+
return f"## Original Article:\n\n{input_text}\n\n## Summarization:\n\n{response}"
|
39 |
else:
|
40 |
+
return "Please fill the input to generate outputs."
|
41 |
+
|
42 |
+
def update_input(example):
|
43 |
+
return examples[example]
|
44 |
+
|
45 |
+
def create_summarization_interface():
|
46 |
+
with gr.Blocks() as demo:
|
47 |
+
gr.Markdown("## This is an example to show summarization")
|
48 |
+
|
49 |
+
with gr.Row():
|
50 |
+
example_dropdown = gr.Dropdown(choices=list(examples.keys()), label="Choose an example")
|
51 |
+
model_dropdown = gr.Dropdown(choices=["gpt-3.5-turbo", "gpt-4o", "gpt-4"], label="Choose a model", value="gpt-3.5-turbo")
|
52 |
+
|
53 |
+
input_text = gr.Textbox(label="Input Text", lines=10, placeholder="Enter text here...")
|
54 |
+
submit_button = gr.Button("Submit")
|
55 |
+
output = gr.Markdown()
|
56 |
+
|
57 |
+
example_dropdown.change(update_input, inputs=[example_dropdown], outputs=[input_text])
|
58 |
+
submit_button.click(process_input, inputs=[input_text, model_dropdown], outputs=[output])
|
59 |
+
|
60 |
+
return demo
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
demo = create_summarization_interface()
|
64 |
+
demo.launch()
|
test.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
def page1():
|
5 |
+
with gr.Group():
|
6 |
+
gr.Markdown("# Page 1 Content")
|
7 |
+
input_text = gr.Textbox(label="Enter some text")
|
8 |
+
output_text = gr.Textbox(label="Output")
|
9 |
+
button = gr.Button("Process")
|
10 |
+
|
11 |
+
def process_text(text):
|
12 |
+
return text.upper()
|
13 |
+
|
14 |
+
button.click(fn=process_text, inputs=input_text, outputs=output_text)
|
15 |
+
|
16 |
+
def page2():
|
17 |
+
with gr.Group():
|
18 |
+
gr.Markdown("# Page 2 Content")
|
19 |
+
num1 = gr.Number(label="Number 1")
|
20 |
+
num2 = gr.Number(label="Number 2")
|
21 |
+
result = gr.Number(label="Result")
|
22 |
+
add_btn = gr.Button("Add")
|
23 |
+
|
24 |
+
def add_numbers(a, b):
|
25 |
+
return a + b
|
26 |
+
|
27 |
+
add_btn.click(fn=add_numbers, inputs=[num1, num2], outputs=result)
|
28 |
+
|
29 |
+
def page3():
|
30 |
+
with gr.Group():
|
31 |
+
gr.Markdown("# Page 3 Content")
|
32 |
+
image_input = gr.Image()
|
33 |
+
image_output = gr.Image()
|
34 |
+
flip_btn = gr.Button("Flip Image")
|
35 |
+
|
36 |
+
def flip_image(img):
|
37 |
+
return np.fliplr(img) if img is not None else None
|
38 |
+
|
39 |
+
flip_btn.click(fn=flip_image, inputs=image_input, outputs=image_output)
|
40 |
+
|
41 |
+
with gr.Blocks() as demo:
|
42 |
+
with gr.Row():
|
43 |
+
with gr.Column(scale=1):
|
44 |
+
# Sidebar
|
45 |
+
gr.Markdown("### Navigation")
|
46 |
+
|
47 |
+
with gr.Column(scale=4):
|
48 |
+
# Main content area using Tabs
|
49 |
+
with gr.Tabs() as tabs:
|
50 |
+
with gr.TabItem("Page 1"):
|
51 |
+
page1()
|
52 |
+
with gr.TabItem("Page 2"):
|
53 |
+
page2()
|
54 |
+
with gr.TabItem("Page 3"):
|
55 |
+
page3()
|
56 |
+
|
57 |
+
demo.launch()
|
utils/multiple_stream.py
CHANGED
@@ -1,34 +1,60 @@
|
|
1 |
import copy
|
2 |
import random
|
3 |
-
from threading import currentThread
|
4 |
from time import sleep
|
|
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
_TEST_ = """
|
11 |
-
Test of Time. A Benchmark for Evaluating LLMs on Temporal Reasoning. Large language models (LLMs) have \
|
12 |
-
showcased remarkable reasoning capabilities, yet they remain susceptible to errors, particularly in temporal \
|
13 |
-
reasoning tasks involving complex temporal logic.
|
14 |
-
"""
|
15 |
|
16 |
def generate_data_test():
|
17 |
-
"""
|
18 |
-
temp = copy.deepcopy(
|
19 |
l1 = temp.split()
|
20 |
random.shuffle(l1)
|
21 |
temp = ' '.join(l1)
|
22 |
-
|
23 |
for word in temp.split(" "):
|
24 |
yield word + " "
|
25 |
-
sleep(0.1)
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
|
29 |
-
"""Populate columns simultaneously"""
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
1 |
import copy
|
2 |
import random
|
|
|
3 |
from time import sleep
|
4 |
+
import gradio as gr
|
5 |
|
6 |
+
TEST = """ Test of Time. A Benchmark for Evaluating LLMs on Temporal Reasoning. Large language models (LLMs) have
|
7 |
+
showcased remarkable reasoning capabilities, yet they remain susceptible to errors, particularly in temporal
|
8 |
+
reasoning tasks involving complex temporal logic. """
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def generate_data_test():
|
11 |
+
"""Generator to yield words"""
|
12 |
+
temp = copy.deepcopy(TEST)
|
13 |
l1 = temp.split()
|
14 |
random.shuffle(l1)
|
15 |
temp = ' '.join(l1)
|
|
|
16 |
for word in temp.split(" "):
|
17 |
yield word + " "
|
|
|
18 |
|
19 |
+
def stream_data(progress=gr.Progress()):
|
20 |
+
"""Stream data to all columns"""
|
21 |
+
outputs = ["", "", ""]
|
22 |
+
generators = [generate_data_test() for _ in range(3)]
|
23 |
+
|
24 |
+
while True:
|
25 |
+
updated = False
|
26 |
+
for i, gen in enumerate(generators):
|
27 |
+
try:
|
28 |
+
word = next(gen)
|
29 |
+
outputs[i] += word
|
30 |
+
updated = True
|
31 |
+
except StopIteration:
|
32 |
+
pass
|
33 |
+
|
34 |
+
if not updated:
|
35 |
+
break
|
36 |
+
|
37 |
+
yield tuple(outputs)
|
38 |
+
sleep(0.01)
|
39 |
+
|
40 |
+
def create_interface():
|
41 |
+
with gr.Group():
|
42 |
+
with gr.Row():
|
43 |
+
col1 = gr.Textbox(label="Column 1", lines=10)
|
44 |
+
col2 = gr.Textbox(label="Column 2", lines=10)
|
45 |
+
col3 = gr.Textbox(label="Column 3", lines=10)
|
46 |
+
|
47 |
+
start_btn = gr.Button("Start Streaming")
|
48 |
+
|
49 |
+
start_btn.click(
|
50 |
+
fn=stream_data,
|
51 |
+
outputs=[col1, col2, col3],
|
52 |
+
show_progress=False
|
53 |
+
)
|
54 |
|
55 |
+
#return demo
|
|
|
56 |
|
57 |
+
if __name__ == "__main__":
|
58 |
+
demo = create_interface()
|
59 |
+
demo.queue()
|
60 |
+
demo.launch()
|