Update README.md
Browse files
README.md
CHANGED
|
@@ -1,12 +1,3 @@
|
|
| 1 |
-
---
|
| 2 |
-
tag: text-generation
|
| 3 |
-
widget:
|
| 4 |
-
- text: "My name is Julien and I like to"
|
| 5 |
-
example_title: "Julien"
|
| 6 |
-
- text: "My name is Merve and my favorite"
|
| 7 |
-
example_title: "Merve"
|
| 8 |
-
---
|
| 9 |
-
|
| 10 |
Best Generations with
|
| 11 |
```
|
| 12 |
from transformers import OPTForCausalLM
|
|
@@ -20,8 +11,6 @@ inputs = tokenizer("Covid-19 is Positive, 42.237% of Lungs show GGO, Lower Left
|
|
| 20 |
generate_ids = model.generate(inputs.input_ids,
|
| 21 |
do_sample=True,
|
| 22 |
max_new_tokens=1000,
|
| 23 |
-
top_k=50,
|
| 24 |
-
top_p=0.95,
|
| 25 |
)
|
| 26 |
completion = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 27 |
print(completion)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
Best Generations with
|
| 2 |
```
|
| 3 |
from transformers import OPTForCausalLM
|
|
|
|
| 11 |
generate_ids = model.generate(inputs.input_ids,
|
| 12 |
do_sample=True,
|
| 13 |
max_new_tokens=1000,
|
|
|
|
|
|
|
| 14 |
)
|
| 15 |
completion = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 16 |
print(completion)
|