initial model uploaded
Browse files- README.md +89 -0
- config.json +51 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- spiece.model +3 -0
- tokenizer_config.json +1 -0
README.md
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pipeline_tag: paraphrasing
|
3 |
+
tags:
|
4 |
+
- paraphrasing
|
5 |
+
language: en
|
6 |
+
license: apache-2.0
|
7 |
+
---
|
8 |
+
|
9 |
+
## Model in Action 🚀
|
10 |
+
|
11 |
+
```python
|
12 |
+
import torch
|
13 |
+
from transformers import T5ForConditionalGeneration,T5Tokenizer
|
14 |
+
|
15 |
+
|
16 |
+
def set_seed(seed):
|
17 |
+
torch.manual_seed(seed)
|
18 |
+
if torch.cuda.is_available():
|
19 |
+
torch.cuda.manual_seed_all(seed)
|
20 |
+
|
21 |
+
set_seed(42)
|
22 |
+
|
23 |
+
model = T5ForConditionalGeneration.from_pretrained('valurank/t5-paraphraser')
|
24 |
+
tokenizer = T5Tokenizer.from_pretrained('valurank/t5-paraphraser')
|
25 |
+
|
26 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
27 |
+
print ("device ",device)
|
28 |
+
model = model.to(device)
|
29 |
+
|
30 |
+
sentence = "Which course should I take to get started in data science?"
|
31 |
+
# sentence = "What are the ingredients required to bake a perfect cake?"
|
32 |
+
# sentence = "What is the best possible approach to learn aeronautical engineering?"
|
33 |
+
# sentence = "Do apples taste better than oranges in general?"
|
34 |
+
|
35 |
+
|
36 |
+
text = "paraphrase: " + sentence + " </s>"
|
37 |
+
|
38 |
+
|
39 |
+
max_len = 256
|
40 |
+
|
41 |
+
encoding = tokenizer.encode_plus(text,pad_to_max_length=True, return_tensors="pt")
|
42 |
+
input_ids, attention_masks = encoding["input_ids"].to(device), encoding["attention_mask"].to(device)
|
43 |
+
|
44 |
+
|
45 |
+
# set top_k = 50 and set top_p = 0.95 and num_return_sequences = 3
|
46 |
+
beam_outputs = model.generate(
|
47 |
+
input_ids=input_ids, attention_mask=attention_masks,
|
48 |
+
do_sample=True,
|
49 |
+
max_length=256,
|
50 |
+
top_k=120,
|
51 |
+
top_p=0.98,
|
52 |
+
early_stopping=True,
|
53 |
+
num_return_sequences=10
|
54 |
+
)
|
55 |
+
|
56 |
+
|
57 |
+
print ("\nOriginal Question ::")
|
58 |
+
print (sentence)
|
59 |
+
print ("\n")
|
60 |
+
print ("Paraphrased Questions :: ")
|
61 |
+
final_outputs =[]
|
62 |
+
for beam_output in beam_outputs:
|
63 |
+
sent = tokenizer.decode(beam_output, skip_special_tokens=True,clean_up_tokenization_spaces=True)
|
64 |
+
if sent.lower() != sentence.lower() and sent not in final_outputs:
|
65 |
+
final_outputs.append(sent)
|
66 |
+
|
67 |
+
for i, final_output in enumerate(final_outputs):
|
68 |
+
print("{}: {}".format(i, final_output))
|
69 |
+
|
70 |
+
```
|
71 |
+
## Output
|
72 |
+
```
|
73 |
+
Original Question ::
|
74 |
+
Which course should I take to get started in data science?
|
75 |
+
|
76 |
+
|
77 |
+
Paraphrased Questions ::
|
78 |
+
0: What should I learn to become a data scientist?
|
79 |
+
1: How do I get started with data science?
|
80 |
+
2: How would you start a data science career?
|
81 |
+
3: How can I start learning data science?
|
82 |
+
4: How do you get started in data science?
|
83 |
+
5: What's the best course for data science?
|
84 |
+
6: Which course should I start with for data science?
|
85 |
+
7: What courses should I follow to get started in data science?
|
86 |
+
8: What degree should be taken by a data scientist?
|
87 |
+
9: Which course should I follow to become a Data Scientist?
|
88 |
+
```
|
89 |
+
|
config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"T5ForConditionalGeneration"
|
4 |
+
],
|
5 |
+
"d_ff": 3072,
|
6 |
+
"d_kv": 64,
|
7 |
+
"d_model": 768,
|
8 |
+
"decoder_start_token_id": 0,
|
9 |
+
"dropout_rate": 0.1,
|
10 |
+
"eos_token_id": 1,
|
11 |
+
"initializer_factor": 1.0,
|
12 |
+
"is_encoder_decoder": true,
|
13 |
+
"layer_norm_epsilon": 1e-06,
|
14 |
+
"model_type": "t5",
|
15 |
+
"n_positions": 512,
|
16 |
+
"num_heads": 12,
|
17 |
+
"num_layers": 12,
|
18 |
+
"output_past": true,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"relative_attention_num_buckets": 32,
|
21 |
+
"task_specific_params": {
|
22 |
+
"summarization": {
|
23 |
+
"early_stopping": true,
|
24 |
+
"length_penalty": 2.0,
|
25 |
+
"max_length": 200,
|
26 |
+
"min_length": 30,
|
27 |
+
"no_repeat_ngram_size": 3,
|
28 |
+
"num_beams": 4,
|
29 |
+
"prefix": "summarize: "
|
30 |
+
},
|
31 |
+
"translation_en_to_de": {
|
32 |
+
"early_stopping": true,
|
33 |
+
"max_length": 300,
|
34 |
+
"num_beams": 4,
|
35 |
+
"prefix": "translate English to German: "
|
36 |
+
},
|
37 |
+
"translation_en_to_fr": {
|
38 |
+
"early_stopping": true,
|
39 |
+
"max_length": 300,
|
40 |
+
"num_beams": 4,
|
41 |
+
"prefix": "translate English to French: "
|
42 |
+
},
|
43 |
+
"translation_en_to_ro": {
|
44 |
+
"early_stopping": true,
|
45 |
+
"max_length": 300,
|
46 |
+
"num_beams": 4,
|
47 |
+
"prefix": "translate English to Romanian: "
|
48 |
+
}
|
49 |
+
},
|
50 |
+
"vocab_size": 32128
|
51 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:47909fb9fa11cf95db9c501c120ff2212cd6c89cf80e00e811cd6239d31d9719
|
3 |
+
size 891691413
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
|
spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
|
3 |
+
size 791656
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model_max_length": 512}
|