aaliyaan commited on
Commit
6f78863
·
1 Parent(s): 7efe136

Initial commit for themed Gradio app

Browse files
Files changed (2) hide show
  1. app.py +106 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ from PyPDF2 import PdfReader
4
+
5
+ # Models and tokenizers setup
6
+ models = {
7
+ "Text Generator (Bloom)": {
8
+ "model": AutoModelForSeq2SeqLM.from_pretrained("bigscience/bloom-560m"),
9
+ "tokenizer": AutoTokenizer.from_pretrained("bigscience/bloom-560m"),
10
+ },
11
+ "PDF Summarizer (T5)": {
12
+ "model": AutoModelForSeq2SeqLM.from_pretrained("t5-small"),
13
+ "tokenizer": AutoTokenizer.from_pretrained("t5-small"),
14
+ },
15
+ "Broken Answer (T0pp)": {
16
+ "model": AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp"),
17
+ "tokenizer": AutoTokenizer.from_pretrained("bigscience/T0pp"),
18
+ },
19
+ }
20
+
21
+ # Function for text generation
22
+ def generate_text(model_choice, input_text, max_tokens, temperature, top_p):
23
+ model_info = models[model_choice]
24
+ tokenizer = model_info["tokenizer"]
25
+ model = model_info["model"]
26
+
27
+ inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=512)
28
+ outputs = model.generate(
29
+ **inputs, max_length=max_tokens, num_beams=5, early_stopping=True, temperature=temperature, top_p=top_p
30
+ )
31
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+
33
+ # Function for PDF summarization
34
+ def summarize_pdf(pdf_file, max_tokens, temperature, top_p):
35
+ reader = PdfReader(pdf_file)
36
+ text = ""
37
+ for page in reader.pages:
38
+ text += page.extract_text()
39
+
40
+ model_info = models["PDF Summarizer (T5)"]
41
+ tokenizer = model_info["tokenizer"]
42
+ model = model_info["model"]
43
+
44
+ inputs = tokenizer("summarize: " + text, return_tensors="pt", padding=True, truncation=True, max_length=512)
45
+ outputs = model.generate(
46
+ **inputs, max_length=max_tokens, num_beams=5, early_stopping=True, temperature=temperature, top_p=top_p
47
+ )
48
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
49
+
50
+ # Build Gradio interface
51
+ def launch_custom_app():
52
+ with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
53
+ gr.Markdown("<h1 style='text-align: center;'>💡 Multi-Model Assistant</h1>")
54
+ gr.Markdown("<p style='text-align: center;'>Switch between text generation, PDF summarization, or quirky broken answers!</p>")
55
+
56
+ with gr.Tabs():
57
+ # Tab for Text Generation
58
+ with gr.Tab("Text Generator"):
59
+ model_choice = gr.Dropdown(choices=list(models.keys()), label="Choose a Model", value="Text Generator (Bloom)")
60
+ input_text = gr.Textbox(label="Enter Text")
61
+ max_tokens = gr.Slider(minimum=10, maximum=512, value=150, step=10, label="Max Tokens")
62
+ temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature")
63
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
64
+ output_text = gr.Textbox(label="Generated Text", interactive=False)
65
+ generate_button = gr.Button("Generate Text")
66
+
67
+ generate_button.click(
68
+ generate_text,
69
+ inputs=[model_choice, input_text, max_tokens, temperature, top_p],
70
+ outputs=output_text
71
+ )
72
+
73
+ # Tab for PDF Summarization
74
+ with gr.Tab("PDF Summarizer"):
75
+ pdf_file = gr.File(label="Upload a PDF File", file_types=[".pdf"])
76
+ max_tokens_pdf = gr.Slider(minimum=10, maximum=512, value=150, step=10, label="Max Tokens")
77
+ temperature_pdf = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature")
78
+ top_p_pdf = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
79
+ summary_output = gr.Textbox(label="PDF Summary", interactive=False)
80
+ summarize_button = gr.Button("Summarize PDF")
81
+
82
+ summarize_button.click(
83
+ summarize_pdf,
84
+ inputs=[pdf_file, max_tokens_pdf, temperature_pdf, top_p_pdf],
85
+ outputs=summary_output
86
+ )
87
+
88
+ # Tab for Broken Model
89
+ with gr.Tab("Broken Answers"):
90
+ broken_input = gr.Textbox(label="Enter Text")
91
+ broken_max_tokens = gr.Slider(minimum=10, maximum=512, value=150, step=10, label="Max Tokens")
92
+ broken_temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature")
93
+ broken_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
94
+ broken_output = gr.Textbox(label="Broken Model Output", interactive=False)
95
+ broken_button = gr.Button("Generate Broken Answer")
96
+
97
+ broken_button.click(
98
+ lambda text, max_tokens, temp, top_p: generate_text("Broken Answer (T0pp)", text, max_tokens, temp, top_p),
99
+ inputs=[broken_input, broken_max_tokens, broken_temperature, broken_top_p],
100
+ outputs=broken_output
101
+ )
102
+
103
+ demo.launch()
104
+
105
+ # Launch the app
106
+ launch_custom_app()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ transformers
3
+ PyPDF2