Update README.md (#2)
Browse files- Update README.md (78a84c01cf1ecfb297f9e97b4a1ee357e2169051)
README.md
CHANGED
|
@@ -158,7 +158,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-xl")
|
|
| 158 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl")
|
| 159 |
|
| 160 |
input_text = "translate English to German: How old are you?"
|
| 161 |
-
input_ids = tokenizer
|
| 162 |
|
| 163 |
outputs = model.generate(input_ids)
|
| 164 |
print(tokenizer.decode(outputs[0]))
|
|
@@ -179,7 +179,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-xl")
|
|
| 179 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto")
|
| 180 |
|
| 181 |
input_text = "translate English to German: How old are you?"
|
| 182 |
-
input_ids = tokenizer
|
| 183 |
|
| 184 |
outputs = model.generate(input_ids)
|
| 185 |
print(tokenizer.decode(outputs[0]))
|
|
@@ -203,7 +203,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-xl")
|
|
| 203 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto", torch_dtype=torch.float16)
|
| 204 |
|
| 205 |
input_text = "translate English to German: How old are you?"
|
| 206 |
-
input_ids = tokenizer
|
| 207 |
|
| 208 |
outputs = model.generate(input_ids)
|
| 209 |
print(tokenizer.decode(outputs[0]))
|
|
@@ -224,7 +224,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-xl")
|
|
| 224 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto", load_in_8bit=True)
|
| 225 |
|
| 226 |
input_text = "translate English to German: How old are you?"
|
| 227 |
-
input_ids = tokenizer
|
| 228 |
|
| 229 |
outputs = model.generate(input_ids)
|
| 230 |
print(tokenizer.decode(outputs[0]))
|
|
|
|
| 158 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl")
|
| 159 |
|
| 160 |
input_text = "translate English to German: How old are you?"
|
| 161 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
| 162 |
|
| 163 |
outputs = model.generate(input_ids)
|
| 164 |
print(tokenizer.decode(outputs[0]))
|
|
|
|
| 179 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto")
|
| 180 |
|
| 181 |
input_text = "translate English to German: How old are you?"
|
| 182 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
|
| 183 |
|
| 184 |
outputs = model.generate(input_ids)
|
| 185 |
print(tokenizer.decode(outputs[0]))
|
|
|
|
| 203 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto", torch_dtype=torch.float16)
|
| 204 |
|
| 205 |
input_text = "translate English to German: How old are you?"
|
| 206 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
|
| 207 |
|
| 208 |
outputs = model.generate(input_ids)
|
| 209 |
print(tokenizer.decode(outputs[0]))
|
|
|
|
| 224 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto", load_in_8bit=True)
|
| 225 |
|
| 226 |
input_text = "translate English to German: How old are you?"
|
| 227 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
|
| 228 |
|
| 229 |
outputs = model.generate(input_ids)
|
| 230 |
print(tokenizer.decode(outputs[0]))
|