fede97 commited on
Commit
8af190d
1 Parent(s): 0c8d65a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -0
app.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Run the script and open the link in the browser.
2
+
3
+ import os
4
+ import gradio as gr
5
+ import streamlit as st
6
+ import torch
7
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
8
+
9
+ # scratch with latbert tokenizer
10
+ CHECKPOINT_PATH= 'scratch_2-nodes_tokenizer_latbert-original_packing_fcocchi/'
11
+ CHECKPOINT_PATH= 'itserr/latin_llm_alpha'
12
+
13
+ print(f"Loading model from: {CHECKPOINT_PATH}")
14
+ tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_PATH, token=os.environ['HF_TOKEN'])
15
+ model = AutoModelForCausalLM.from_pretrained(CHECKPOINT_PATH, token=os.environ['HF_TOKEN'])
16
+
17
+ description="""
18
+ This is a Latin Language Model (LLM) based on GPT-2 and it was trained on a large corpus of Latin texts and can generate text in Latin. \n
19
+ Demo instructions:
20
+ - Enter a prompt in Latin in the Input Text box.
21
+ - Select the temperature value to control the randomness of the generated text (higher value produce a more creative and unstable answer).
22
+ - Click the 'Generate Text' button to trigger model generation.
23
+ - (Optional) insert a Feedback text in the box.
24
+ - Click the 'Like' or 'Dislike' button to judge the generation correctness.
25
+ """
26
+ title= "(L<sup>2</sup>) - Latin Language Model"
27
+ article= "hello world ..."
28
+ examples= ['Accidere ex una scintilla', 'Audacter calumniare,', 'Consolatium misero comites']
29
+ logo_image= 'ITSERR_row_logo.png'
30
+
31
+ def generate_text(prompt, slider):
32
+ if torch.cuda.is_available(): device = torch.device("cuda")
33
+ else:
34
+ device = torch.device("cpu")
35
+ print("No GPU available")
36
+
37
+ print("***** Generate *****")
38
+ text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
39
+ #generated_text = text_generator(prompt, max_length=100)
40
+ generated_text = text_generator(prompt, max_length=50, do_sample=True, temperature=slider, repetition_penalty=2.0, truncation=True)
41
+ return generated_text[0]['generated_text']
42
+
43
+ # Function to handle user preferences
44
+ def handle_preference(preference, input, output, feedback, temp_value, preferences_file="preferences.json"):
45
+ """
46
+ Format values stored in preferences:
47
+ - input text
48
+ - output generated text
49
+ - user feedback
50
+ - float temperature value
51
+ """
52
+
53
+ if os.path.exists(preferences_file):
54
+ with open(preferences_file, "r") as file:
55
+ preferences = json.load(file)
56
+ else:
57
+ preferences = {"like": [], "dislike": [], "count_like": 0, "count_dislike": 0}
58
+
59
+ if input == output:
60
+ output_tuple= ("", "", feedback)
61
+ else:
62
+ output_tuple= (input, output.split(input)[-1], feedback, temp_value)
63
+ if preference == "like":
64
+ preferences["like"].append(output_tuple)
65
+ if output_tuple[1] != "" :
66
+ preferences["count_like"] += 1
67
+ elif preference == "dislike":
68
+ preferences["dislike"].append(output_tuple)
69
+ if output_tuple[1] != "" :
70
+ preferences["count_dislike"] += 1
71
+
72
+ with open(preferences_file, "w") as file:
73
+ json.dump(preferences, file)
74
+
75
+ print(f"Admin log: like: {preferences['count_like']} and dislike: {preferences['count_dislike']}")
76
+ return f"You select '{preference}' as answer of the model generation. Thank you for your time!"
77
+
78
+ custom_css = """
79
+ #logo {
80
+ display: block;
81
+ margin-left: auto;
82
+ margin-right: auto;
83
+ width: 280px;
84
+ height: 140px;
85
+ }
86
+ """
87
+
88
+ with gr.Blocks(css=custom_css) as demo:
89
+ gr.Image(logo_image, elem_id="logo")
90
+ gr.Markdown(f"<h1 style='text-align: center;'>{title}</h1>")
91
+ gr.Markdown(description)
92
+
93
+ with gr.Row():
94
+ with gr.Column():
95
+ input_text = gr.Textbox(lines=5, placeholder="Enter latin text here...", label="Input Text")
96
+ with gr.Column():
97
+ output_text = gr.Textbox(lines=5, placeholder="Output text will appear here...", label="Output Text")
98
+
99
+ gr.Examples(examples=examples, inputs=input_text)
100
+ temperature_slider = gr.Slider(minimum=0.1, maximum=5.0, step=0.1, value=1.0, label="Temperature")
101
+
102
+ clean_button = gr.Button("Generate Text")
103
+ clean_button.click(fn=generate_text, inputs=[input_text, temperature_slider], outputs=output_text)
104
+ feedback_output = gr.Textbox(lines=1, placeholder="If you want to provide a feedback, please fill this box ...", label="Feedback")
105
+
106
+ with gr.Row():
107
+ like_button = gr.Button("Like")
108
+ dislike_button = gr.Button("Dislike")
109
+
110
+ button_output = gr.Textbox(lines=1, placeholder="Please submit your choice", label="Latin Language Model Demo")
111
+ like_button.click(fn=lambda x,y,z,v: handle_preference("like", x, y, z, v), inputs=[input_text, output_text, feedback_output, temperature_slider], outputs=button_output)
112
+ dislike_button.click(fn=lambda x,y,z,v: handle_preference("dislike", x, y, z, v), inputs=[input_text, output_text, feedback_output, temperature_slider], outputs=button_output)
113
+ #gr.Markdown(article)
114
+
115
+ demo.launch(share=True)