kevin1911 commited on
Commit
1f47b32
·
verified ·
1 Parent(s): 3e934f0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -0
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+
4
+ def load_model(model_name="gpt2"):
5
+ """Load a GPT-2 model and tokenizer from Hugging Face."""
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
9
+
10
+ # Initialize the pipeline outside the function so it's loaded only once
11
+ generator = load_model()
12
+
13
+ def generate_text(prompt, max_length=100, temperature=1.0, top_p=0.9):
14
+ """
15
+ Generates text based on the prompt using a GPT-2 model.
16
+ Args:
17
+ prompt (str): Input text from the user.
18
+ max_length (int): Max tokens in the prompt + generation.
19
+ temperature (float): Controls randomness.
20
+ top_p (float): Nucleus sampling hyperparameter.
21
+ Returns:
22
+ str: Generated text from GPT-2.
23
+ """
24
+ results = generator(
25
+ prompt,
26
+ max_length=max_length,
27
+ temperature=temperature,
28
+ top_p=top_p,
29
+ num_return_sequences=1,
30
+ # GPT-2 may not have a dedicated pad token, so eos_token_id used:
31
+ pad_token_id=generator.tokenizer.eos_token_id
32
+ )
33
+ return results[0]["generated_text"]
34
+
35
+ # Build the Gradio interface
36
+ with gr.Blocks() as demo:
37
+ gr.Markdown(
38
+ """
39
+ # Educational GPT-2 Demo
40
+ This demo demonstrates how a smaller Large Language Model (GPT-2) predicts text.
41
+ Change the parameters below to see how the model's output is affected:
42
+ - **Max Length** controls the total number of tokens in the output.
43
+ - **Temperature** controls randomness (higher means more creative/chaotic).
44
+ - **Top-p** controls the diversity of tokens (lower means more conservative choices).
45
+ """
46
+ )
47
+
48
+ with gr.Row():
49
+ with gr.Column():
50
+ prompt = gr.Textbox(
51
+ lines=4,
52
+ label="Prompt",
53
+ placeholder="Type a prompt here",
54
+ value="Once upon a time,"
55
+ )
56
+ max_len = gr.Slider(
57
+ minimum=20,
58
+ maximum=200,
59
+ value=100,
60
+ step=1,
61
+ label="Max Length"
62
+ )
63
+ temp = gr.Slider(
64
+ minimum=0.1,
65
+ maximum=2.0,
66
+ value=1.0,
67
+ step=0.1,
68
+ label="Temperature"
69
+ )
70
+ top_p = gr.Slider(
71
+ minimum=0.1,
72
+ maximum=1.0,
73
+ value=0.9,
74
+ step=0.05,
75
+ label="Top-p"
76
+ )
77
+ generate_button = gr.Button("Generate")
78
+
79
+ with gr.Column():
80
+ output_box = gr.Textbox(
81
+ label="Generated Text",
82
+ lines=10
83
+ )
84
+
85
+ generate_button.click(
86
+ fn=generate_text,
87
+ inputs=[prompt, max_len, temp, top_p],
88
+ outputs=[output_box]
89
+ )
90
+
91
+ demo.launch()