nicholascao
commited on
Commit
·
335161d
1
Parent(s):
847982e
Update README.md
Browse files
README.md
CHANGED
@@ -39,13 +39,14 @@ import torch
|
|
39 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
40 |
|
41 |
tokenizer = AutoTokenizer.from_pretrained('nicholascao/chatbloom-1b7-sft')
|
|
|
|
|
42 |
model = AutoModelForCausalLM.from_pretrained('nicholascao/chatbloom-1b7-sft').half()
|
43 |
-
generation_config = GenerationConfig.from_pretrained('nicholascao/chatbloom-1b7-sft')
|
44 |
|
45 |
inputs = tokenizer('<Human>: Hello <eoh> <Assistant>:', return_tensors='pt').to(torch.cuda.current_device())
|
46 |
model.to(torch.cuda.current_device())
|
47 |
|
48 |
-
output = model.generate(**inputs,
|
49 |
output = tokenizer.decode(output[0], skip_special_tokens=True)
|
50 |
print(output)
|
51 |
```
|
|
|
39 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
40 |
|
41 |
tokenizer = AutoTokenizer.from_pretrained('nicholascao/chatbloom-1b7-sft')
|
42 |
+
tokenizer.pad_token_id = tokenizer.eos_token_id
|
43 |
+
|
44 |
model = AutoModelForCausalLM.from_pretrained('nicholascao/chatbloom-1b7-sft').half()
|
|
|
45 |
|
46 |
inputs = tokenizer('<Human>: Hello <eoh> <Assistant>:', return_tensors='pt').to(torch.cuda.current_device())
|
47 |
model.to(torch.cuda.current_device())
|
48 |
|
49 |
+
output = model.generate(**inputs, max_length=768, do_sample=True, temperature=0.8, top_k=50, early_stopping=True, repetition_penalty=1.1)
|
50 |
output = tokenizer.decode(output[0], skip_special_tokens=True)
|
51 |
print(output)
|
52 |
```
|