File size: 4,202 Bytes
5f6c201
f425e50
5f6c201
 
 
f425e50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f6c201
992d89d
5f6c201
 
992d89d
5f6c201
 
 
f425e50
992d89d
 
 
5f6c201
 
992d89d
5f6c201
992d89d
 
 
 
 
5f6c201
992d89d
 
 
 
 
 
 
 
 
 
5f6c201
 
 
 
992d89d
 
f425e50
 
992d89d
5f6c201
992d89d
 
 
 
5f6c201
992d89d
 
 
 
 
1f422ed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
from pipeline_utils import task_dropdown_choices, handle_task_change, review_training_choices, test_pipeline

playground = gr.Blocks()


def create_playground_header():
    gr.Markdown("""
                # 🤗 Hugging Face Playground
                **Try your ideas here. Select from Text, Image or Audio**
                """)


def create_playground_footer():
    gr.Markdown("""
                ### To Learn More about 🤗 Hugging Face,[Click Here](https://huggingface.co/docs)
                ### [Click Here](https://huggingface.co/spaces/nsethi610/ns-gradio-apps/discussions/1) to provide Feedback, or participate in development of this tool.Let's make AI easy for everyone.
                """)


def create_tabs_header():
    with gr.Row():
        with gr.Column(scale=4):
            radio = gr.Radio(
                ["Use Pipeline", "Fine Tune"],
                label="Select Use Pipeline to try out HF models or Fine Tune to test it on your own datasets",
                value="Use Pipeline",
                interactive=True,
            )
        with gr.Column(scale=1):
            test_pipeline_button = gr.Button(
                value="Test", variant="primary", size="sm")
        return radio, test_pipeline_button


with playground:
    create_playground_header()
    with gr.Tabs():
        with gr.TabItem("Text"):
            radio, test_pipeline_button = create_tabs_header()
            with gr.Row(visible=True) as use_pipeline:
                with gr.Column():
                    task_dropdown = gr.Dropdown(
                        choices=task_dropdown_choices(),
                        label="Task",
                        interactive=True,
                        info="Select Pipelines for natural language processing tasks or     type if you have your own."
                    )
                    model_dropdown = gr.Dropdown(
                        [], label="Model", info="Select appropriate Model based on the task you selected")
                    prompt_textarea = gr.TextArea(
                        label="Prompt",
                        value="Enter your prompt here",
                        text_align="left",
                        info="Copy/Paste or type your prompt to try out. Make sure to provide clear prompt or try with different prompts"
                    )
                    context_for_question_answer = gr.TextArea(
                        label="Context",
                        value="Enter Context for your question here",
                        visible=False,
                        interactive=True,
                        info="Question answering tasks return an answer given a question. If you’ve ever asked a virtual assistant like Alexa, Siri or Google what the weather is, then you’ve used a question answering model before. Here, we are doing Extractive(extract the answer from the given context) Question answering. "
                    )
                    task_dropdown.change(handle_task_change,
                                         inputs=[task_dropdown],
                                         outputs=[context_for_question_answer,
                                                  model_dropdown, task_dropdown])
                with gr.Column():
                    text = gr.TextArea(label="Generated Text")
            radio.change(review_training_choices,
                         inputs=radio, outputs=use_pipeline)
            test_pipeline_button.click(test_pipeline,
                                       inputs=[
                                           task_dropdown, model_dropdown, prompt_textarea,
                                           context_for_question_answer],
                                       outputs=text)
        with gr.TabItem("Image"):
            radio, test_pipeline_button = create_tabs_header()
            gr.Markdown("""
                        > WIP
                        """)
        with gr.TabItem("Audio"):
            radio, test_pipeline_button = create_tabs_header()
            gr.Markdown("""
                        > WIP
                        """)
    create_playground_footer()
playground.launch(share=True)