File size: 5,210 Bytes
0198505
a20001f
 
8518918
 
 
a20001f
8518918
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a20001f
442a3b7
8518918
 
 
 
 
 
 
 
 
 
 
a20001f
8518918
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a20001f
 
 
8518918
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a20001f
 
 
8518918
 
a20001f
8518918
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0198505
8518918
 
 
ae15986
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import gradio as gr
import pandas as pd
import matplotlib.pyplot as plt
from Prediction import *
import os
from datetime import datetime


examples = []
if os.path.exists("assets/examples.txt"):
    with open("assets/examples.txt", "r", encoding="utf8") as file:
        for sentence in file:
            sentence = sentence.strip()
            examples.append(sentence)
else:
    examples = [
        "Games of the imagination teach us actions have consequences in a realm that can be reset.",
        "But New Jersey farmers are retiring and all over the state, development continues to push out dwindling farmland.",
        "He also is the Head Designer of The Design Trust so-to-speak, besides his regular job ..."
        ]

device = torch.device('cpu')
manager = model_factory("./models", device)


def single_sentence(sentence, model_select):
    df = []
    for model_name in model_select:
        dct = manager[model_name]
        model, tokenizer = dct['model'], dct['tokenizer']
        predictions = predict_single(sentence, tokenizer, model, device)
        df.append([model_name] + predictions)
    return df

def csv_process(csv_file, model_select, attr="content"):
    current_time = datetime.now()
    formatted_time = current_time.strftime("%Y_%m_%d_%H_%M_%S")
    df = pd.read_csv(csv_file.name)
    os.makedirs('output', exist_ok=True)
    outputs = []
    for model_name in model_select:
        data = df.copy(deep=True)
        dct = manager[model_name]
        model, tokenizer = dct['model'], dct['tokenizer']
        predictions = predict_csv(data, attr, tokenizer, model, device)
        output_path = f"output/prediction_{model_name}_{formatted_time}.csv"
        predictions.to_csv(output_path)
        outputs.append(output_path)
    return outputs


my_theme = gr.Theme.from_hub("JohnSmith9982/small_and_pretty")
with gr.Blocks(theme=my_theme, title='XXX') as demo:
    gr.HTML(
        """
        <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
        <a href="https://github.com/xxx" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
        </a>
        <div>
            <h1 >Place the title of the paper here</h1>
            <h5 style="margin: 0;">If you like our project, please give us a star ✨ on Github for the latest update.</h5>
            <div style="display: flex; justify-content: center; align-items: center; text-align: center;>
                <a href="https://arxiv.org/abs/xx.xx"><img src="https://img.shields.io/badge/Arxiv-xx.xx-red"></a>
                <a href='https://huggingface.co/spaces/cheesexuebao/murphy'><img src='https://img.shields.io/badge/Project_Page-Murphy/xxBert' alt='Project Page'></a>
                <a href='https://github.com'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
            </div>
        </div>
        </div>
        """)

    with gr.Tab("Single Sentence"):
        with gr.Row():
            tbox_input = gr.Textbox(label="Input",
                                    info="Please input a sentence here:")
            model_select = gr.CheckboxGroup(manager.keys(), 
                                            label="Models:", 
                                            info="Selecting different model variants to obtain aggregated predictions.")
        tab_output = gr.DataFrame(label='Probability Predictions:', 
                                  headers=["model"] + LABEL_COLUMNS,
                                  datatype=["str"] * (len(LABEL_COLUMNS)+1),
                                  interactive=False, 
                                  wrap=True)
        with gr.Row():
            button_ss = gr.Button("Submit", variant="primary")
            button_ss.click(fn=single_sentence, inputs=[tbox_input, model_select], outputs=[tab_output])
            gr.ClearButton([tbox_input, tab_output])

        gr.Markdown("## Examples")
        gr.Examples(
            examples=examples,
            inputs=tbox_input,
            examples_per_page=5
        )

    with gr.Tab("Csv File"):
        with gr.Row():
            csv_input = gr.File(label="CSV File:",
                                file_types=['.csv'],
                                file_count="single"
                                )
            csv_output = gr.File(label="Predictions:")

        model_select = gr.CheckboxGroup(manager.keys(), 
                label="Models:", 
                info="Selecting different model variants to obtain aggregated predictions.")

        with gr.Row():
            button = gr.Button("Submit", variant="primary")
            button.click(fn=csv_process, inputs=[csv_input, model_select], outputs=[csv_output])
            gr.ClearButton([csv_input, csv_output])

        gr.Markdown("## Examples")
        gr.Examples(
            examples=["assets/csv_examples.csv",],
            inputs=csv_input
        )

    with gr.Tab("Readme"):
        gr.Markdown(
            """
            # Paper Name

            # Authors

            + First author
            + Corresponding author
            
            # Detailed Information

            ...
            """
        )
demo.launch()