|
|
|
|
|
|
|
|
|
|
|
import transformers |
|
import gradio as gr |
|
import torch |
|
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("humarin/chatgpt_paraphraser_on_T5_base") |
|
model = AutoModelForSeq2SeqLM.from_pretrained("humarin/chatgpt_paraphraser_on_T5_base") |
|
|
|
def paraphrase( |
|
Content_to_Rephrase, |
|
num_beams=5, |
|
num_beam_groups=5, |
|
num_return_sequences=5, |
|
repetition_penalty=10.0, |
|
diversity_penalty=3.0, |
|
no_repeat_ngram_size=2, |
|
temperature=0.7, |
|
max_length=128 |
|
): |
|
input_ids = tokenizer( |
|
f'paraphrase: {Content_to_Rephrase}', |
|
return_tensors="pt", padding="longest", |
|
max_length=max_length, |
|
truncation=True, |
|
).input_ids |
|
|
|
outputs = model.generate( |
|
input_ids, temperature=temperature, repetition_penalty=repetition_penalty, |
|
num_return_sequences=num_return_sequences, no_repeat_ngram_size=no_repeat_ngram_size, |
|
num_beams=num_beams, num_beam_groups=num_beam_groups, |
|
max_length=max_length, diversity_penalty=diversity_penalty |
|
) |
|
|
|
res = tokenizer.batch_decode(outputs, skip_special_tokens=True) |
|
res1 = res [0] |
|
res2 = res [1] |
|
res3 = res [3] |
|
res4 = res [4] |
|
|
|
return res1, res2, res3 |
|
|
|
output1 = gr.Textbox(label="Rephrased: Option 1", placeholder="Type or paste here..") |
|
output2 = gr.Textbox(label="Rephrased: Option 2") |
|
output3 = gr.Textbox(label="Rephrased: Option 3") |
|
|
|
iface = gr.Interface(fn=paraphrase, |
|
inputs=["text"], |
|
outputs=[output1, output2, output3], |
|
title="AI Paraphraser", |
|
description="<h3>How to use the content rephraser / re-writer</h3><br><ul><li list-style: square inside;>Paste text in the input box and press 'Submit'.</li><li>Input only short pieces of content</li><li>Creativity level : Medium (Temperature value = 0.7)</li><li>The rephrased sentences / paragraphs *may not* be better than the original input.</li></ul>", |
|
examples=[ |
|
["The sun rises in the morning."], |
|
["Rephrasing sentences can throw up surprising results."], |
|
["The weather is getting more and more unpredictable these days."], |
|
["AI content generation is easy, but good prompt instructions generate better content."], |
|
], |
|
cache_examples=True, |
|
) |
|
|
|
iface.launch() |