Amitontheweb commited on
Commit
1629dbf
·
verified ·
1 Parent(s): 756904d

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -26
app.py CHANGED
@@ -5,6 +5,8 @@
5
  # - https://huggingface.co/EnglishVoice/t5-base-keywords-to-headline?text=diabetic+diet+plan
6
  # - Apache 2.0
7
 
 
 
8
 
9
  import torch
10
  from transformers import T5ForConditionalGeneration,T5Tokenizer
@@ -15,32 +17,83 @@ model = T5ForConditionalGeneration.from_pretrained("EnglishVoice/t5-base-keyword
15
  tokenizer = T5Tokenizer.from_pretrained("EnglishVoice/t5-base-keywords-to-headline", clean_up_tokenization_spaces=True, legacy=False)
16
  model = model.to(device)
17
 
18
- keywords = "music, sleep, night"
19
-
20
- text = "headline: " + keywords
21
- encoding = tokenizer.encode_plus(text, return_tensors = "pt")
22
- input_ids = encoding["input_ids"].to(device)
23
- attention_masks = encoding["attention_mask"].to(device)
24
- beam_outputs = model.generate(
25
- input_ids = input_ids,
26
- attention_mask = attention_masks,
27
- max_new_tokens = 25,
28
- do_sample = True,
29
- num_return_sequences = 5,
30
- temperature = 1.2,
31
- #num_beams = 20,
32
- #num_beam_groups = 20,
33
- #diversity_penalty=0.8,
34
- no_repeat_ngram_size = 3,
35
- penalty_alpha = 0.8,
36
- #early_stopping = True,
37
- top_k = 15,
38
- #top_p = 0.60,
39
- )
40
-
41
- for i in range(len(beam_outputs)):
42
- result = tokenizer.decode(beam_outputs[i], skip_special_tokens=True)
43
- print(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
 
46
  '''
@@ -90,6 +143,21 @@ demo.launch()
90
  '''
91
 
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
 
95
 
 
5
  # - https://huggingface.co/EnglishVoice/t5-base-keywords-to-headline?text=diabetic+diet+plan
6
  # - Apache 2.0
7
 
8
+ # In[2]:
9
+
10
 
11
  import torch
12
  from transformers import T5ForConditionalGeneration,T5Tokenizer
 
17
  tokenizer = T5Tokenizer.from_pretrained("EnglishVoice/t5-base-keywords-to-headline", clean_up_tokenization_spaces=True, legacy=False)
18
  model = model.to(device)
19
 
20
+
21
+
22
+ # In[5]:
23
+
24
+
25
+ def title_gen(keywords):
26
+
27
+ text = "headline: " + keywords
28
+ encoding = tokenizer.encode_plus(text, return_tensors = "pt")
29
+ input_ids = encoding["input_ids"].to(device)
30
+ attention_masks = encoding["attention_mask"].to(device)
31
+ beam_outputs = model.generate(
32
+ input_ids = input_ids,
33
+ attention_mask = attention_masks,
34
+ max_new_tokens = 30,
35
+ do_sample = True,
36
+ num_return_sequences = 5,
37
+ temperature = 1.2,
38
+ #num_beams = 20,
39
+ #num_beam_groups = 20,
40
+ #diversity_penalty=0.8,
41
+ no_repeat_ngram_size = 3,
42
+ penalty_alpha = 0.8,
43
+ #early_stopping = True,
44
+ top_k = 15,
45
+ #top_p = 0.60,
46
+ )
47
+
48
+ titles = ""
49
+
50
+ for i in range(len(beam_outputs)):
51
+ result = tokenizer.decode(beam_outputs[i], skip_special_tokens=True)
52
+ titles += f"{result}<br>" #Create string with titles and <br> tag for html reading in gradio html
53
+
54
+ return titles
55
+
56
+
57
+ # In[1]:
58
+
59
+
60
+ import gradio as gr
61
+
62
+
63
+ # In[ ]:
64
+
65
+
66
+ iface = gr.Interface(fn=paraphrase,
67
+ inputs=[gr.Textbox(label="Paste 2 or more keywords searated by a comma.", lines=1), "checkbox", gr.Slider(0.1, 2, 0.8)],
68
+ outputs=[gr.HTML(label="Titles:")],
69
+ title="AI Keywords to Title Generator",
70
+ description="Turn keywords into creative suggestions",
71
+ article="<div align=left><h1>AI Creative Title Generator</h1><li>With just keywords, generate a list of creative titles.</li><li>Click on Submit to generate more creative and diverse titles.</li><p>AI Model:<br><li>T5 Model trained on a dataset of titles and related keywords</li><li>Original model id: EnglishVoice/t5-base-keywords-to-headline by English Voice AI Labs</li></p><p>Default parameter details:<br><li>Temperature = 1.2, no_repeat_ngram_size=3, top_k = 15, penalty_alpha = 0.8, max_new_tokens = 30</li></div>",
72
+ flagging_mode='never'
73
+ )
74
+
75
+ iface.launch()
76
+
77
+
78
+ # In[ ]:
79
+
80
+
81
+
82
+
83
+
84
+ # In[ ]:
85
+
86
+
87
+
88
+
89
+
90
+ # In[ ]:
91
+
92
+
93
+
94
+
95
+
96
+ # In[ ]:
97
 
98
 
99
  '''
 
143
  '''
144
 
145
 
146
+ # In[164]:
147
+
148
+
149
+ import gc
150
+ gc.collect()
151
+
152
+
153
+ # In[166]:
154
+
155
+
156
+ gr.close_all()
157
+
158
+
159
+ # In[ ]:
160
+
161
 
162
 
163