Spaces:
Sleeping
Sleeping
File size: 1,520 Bytes
1921336 42c830b 9a1ab03 de53991 34ffea3 1921336 9dfac6e 1921336 9a1ab03 de53991 1921336 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
#from utils.multiple_stream import create_interface
import random
import gradio as gr
import json
import logging
import gc
import torch
from utils.data import dataset
from utils.multiple_stream import stream_data
from pages.summarization_playground import get_model_batch_generation
def create_arena():
with open("prompt/prompt.json", "r") as file:
json_data = file.read()
prompts = json.loads(json_data)
with gr.Blocks() as demo:
with gr.Group():
datapoint = random.choice(dataset)
datapoint = datapoint['section_text'] + '\n\nDialogue:\n' + datapoint['dialogue']
submit_button = gr.Button("✨ Submit ✨")
with gr.Row():
columns = [gr.Textbox(label=f"Prompt {i+1}", lines=10) for i in range(len(prompts))]
content_list = [prompt + '\n{' + datapoint + '}\n\nsummary:' for prompt in prompts]
model = get_model_batch_generation("Qwen/Qwen2-1.5B-Instruct")
def start_streaming():
for data in stream_data(content_list, model):
updates = [gr.update(value=data[i]) for i in range(len(columns))]
yield tuple(updates)
submit_button.click(
fn=start_streaming,
inputs=[],
outputs=columns,
show_progress=False
)
return demo
if __name__ == "__main__":
demo = create_arena()
demo.queue()
demo.launch()
|