from textwrap3 import wrap text = """A Lion lay asleep in the forest, his great head resting on his paws. A timid little Mouse came upon him unexpectedly, and in her fright and haste to get away, ran across the Lion's nose. Roused from his nap, the Lion laid his huge paw angrily on the tiny creature to kill her. "Spare me!" begged the poor Mouse. "Please let me go and some day I will surely repay you." The Lion was much amused to think that a Mouse could ever help him. But he was generous and finally let the Mouse go. Some days later, while stalking his prey in the forest, the Lion was caught in the toils of a hunter's net. Unable to free himself, he filled the forest with his angry roaring. The Mouse knew the voice and quickly found the Lion struggling in the net. Running to one of the great ropes that bound him, she gnawed it until it parted, and soon the Lion was free. "You laughed when I said I would repay you," said the Mouse. "Now you see that even a Mouse can help a Lion." """ for wrp in wrap(text, 150): print (wrp) print ("\n") import torch from transformers import T5ForConditionalGeneration,T5Tokenizer summary_model = T5ForConditionalGeneration.from_pretrained('t5-base') summary_tokenizer = T5Tokenizer.from_pretrained('t5-base') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") summary_model = summary_model.to(device) import random import numpy as np def set_seed(seed: int): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) set_seed(42) import nltk nltk.download('punkt') nltk.download('brown') nltk.download('wordnet') from nltk.corpus import wordnet as wn from nltk.tokenize import sent_tokenize def postprocesstext (content): final="" for sent in sent_tokenize(content): sent = sent.capitalize() final = final +"
"+sent +"
" return final def summarizer(text,model,tokenizer): text = text.strip() text = "summarize: "+text # print (text) max_len = 512 encoding = tokenizer.encode_plus(text,max_length=max_len, pad_to_max_length=False,truncation=True, return_tensors="pt").to(device) input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"] outs = model.generate(input_ids=input_ids, attention_mask=attention_mask, early_stopping=True, num_beams=3, num_return_sequences=1, no_repeat_ngram_size=2, min_length = 75, max_length=300) dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs] summary = dec[0] print(summary) summary = postprocesstext(summary) summary= summary.strip() return summary summarized_text = summarizer(text,summary_model,summary_tokenizer) print ("\noriginal Text >>") for wrp in wrap(text, 150): print (wrp) print ("\n") print ("Summarized Text >>") for wrp in wrap(summarized_text, 150): print (wrp) print ("\n") """# **UI by using Gradio**""" import gradio as gr import re context = gr.Textbox(lines=10, placeholder="Enter paragraph/content here...", label="Enter your content (words input must be more than 150 words).") subject = gr.Textbox(placeholder="Enter subject/title here...", label="Enter your title (title must contain 1 word)") output = gr.Markdown( label="Notes") def generate_question_text(context,subject): words_text = len(re.findall(r'\w+', context)) words_subject = len(re.findall(r'\w+', subject)) if (words_text < 150): raise gr.Error("Invalid Input (Words limit must be more than 150 words).") # print("Number of words:", words) elif (words_subject < 1): raise gr.Error("Invalid Input (Title must be one or more than one word).") else: summary_text = summarizer(context,summary_model,summary_tokenizer) for wrp in wrap(summary_text, 150): print (wrp) # np = get_keywords(context,summary_text,total) # print ("\n\nNoun phrases",np) output="Notes and key points of the topic are:
" summary = summary_text output = output+ summary # mycursor = mydb.cursor() # timedate = datetime.datetime.now() # sql = "INSERT INTO notestexts (subject, input, output, timedate) VALUES (%s,%s, %s,%s)" # val = (subject, context, output, timedate) # mycursor.execute(sql, val) # mydb.commit() # print(mycursor.rowcount, "record inserted.") return output iface = gr.Interface( fn=generate_question_text, inputs=[context,subject], outputs=[output], # css=".gradio-container {background-image: url('file=blue.jpg')}", allow_flagging="never",flagging_options=["Save Data"]) # iface.launch(debug=True, share=True) def generate_question(context,subjectfile): summary_text = summarizer(context,summary_model,summary_tokenizer) for wrp in wrap(summary_text, 150): print (wrp) # np = get_keywords(context,summary_text,total) # print ("\n\nNoun phrases",np) output="Notes and key points of the topic are:
" summary = summary_text output = output+ summary return output import glob import os.path import pandas as pd file =None def filecreate(x,subjectfile): with open(x.name) as fo: text = fo.read() # print(text) words_text = len(re.findall(r'\w+', text)) words_subject = len(re.findall(r'\w+', subjectfile)) if (words_text < 150): raise gr.Error("Invalid Input (Words limit must be more than 150 words).") # print("Number of words:", words) elif (words_subject < 1): raise gr.Error("Invalid Input (Title must be one or more than one word).") else: generated = generate_question(text,subject) # mycursor = mydb.cursor() # timedate= datetime.datetime.now() # sql = "INSERT INTO notesfiles (subject, input, output, timedate) VALUES (%s,%s, %s,%s)" # val = (subject, text, generated, timedate) # mycursor.execute(sql, val) # mydb.commit() # print(mycursor.rowcount, "record inserted.") # return text return generated import gradio as gr context = gr.HTML(label="Text") subjectfile = gr.Textbox(placeholder="Enter subject/title here...", label="Enter your title (title must contain 1 word).") file = gr.File(label="Upload your *.txt file (File must contain more than 150 words).") fface = gr.Interface( fn=filecreate, inputs=[file,subjectfile], outputs=context, # css=".gradio-container {background-image: url('file=blue.jpg')}", allow_flagging="never",flagging_options=["Save Data"]) # fface.launch(debug=True, share=True) demo = gr.TabbedInterface([iface, fface], ["Text", "Upload File"]) demo.launch(debug=True, show_api=False)