DanielWang commited on
Commit
a1ee998
·
1 Parent(s): c2549d7

Update README.md

Browse files

update inference case

Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -35,9 +35,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
35
 
36
  tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/baichuan-7B", trust_remote_code=True)
37
  model = AutoModelForCausalLM.from_pretrained("baichuan-inc/baichuan-7B", device_map="auto", trust_remote_code=True)
38
- inputs = tokenizer('登鹳雀楼->王之涣\n夜雨寄北->\n', return_tensors='pt')
39
  inputs = inputs.to('cuda:0')
40
- pred = model.generate(**inputs, max_new_tokens=512, do_sample=True)
41
  print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
42
  ```
43
 
@@ -47,9 +47,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
47
 
48
  tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/baichuan-7B", trust_remote_code=True)
49
  model = AutoModelForCausalLM.from_pretrained("baichuan-inc/baichuan-7B", device_map="auto", trust_remote_code=True)
50
- inputs = tokenizer('Hamlet->Shakespeare\nOne Hundred Years of Solitude->\n', return_tensors='pt')
51
  inputs = inputs.to('cuda:0')
52
- pred = model.generate(**inputs, max_new_tokens=512, do_sample=True)
53
  print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
54
  ```
55
 
 
35
 
36
  tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/baichuan-7B", trust_remote_code=True)
37
  model = AutoModelForCausalLM.from_pretrained("baichuan-inc/baichuan-7B", device_map="auto", trust_remote_code=True)
38
+ inputs = tokenizer('登鹳雀楼->王之涣\n夜雨寄北->', return_tensors='pt')
39
  inputs = inputs.to('cuda:0')
40
+ pred = model.generate(**inputs, max_new_tokens=64)
41
  print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
42
  ```
43
 
 
47
 
48
  tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/baichuan-7B", trust_remote_code=True)
49
  model = AutoModelForCausalLM.from_pretrained("baichuan-inc/baichuan-7B", device_map="auto", trust_remote_code=True)
50
+ inputs = tokenizer('Hamlet->Shakespeare\nOne Hundred Years of Solitude->', return_tensors='pt')
51
  inputs = inputs.to('cuda:0')
52
+ pred = model.generate(**inputs, max_new_tokens=64)
53
  print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
54
  ```
55