Bo1015 commited on
Commit
b15f910
·
verified ·
1 Parent(s): 1d9eb82

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -25,7 +25,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
25
  import torch
26
 
27
  tokenizer = AutoTokenizer.from_pretrained("biomap-research/xtrimopglm-7b-clm", trust_remote_code=True, use_fast=True)
28
- model = AutoModelForCausalLM.from_config("biomap-research/xtrimopglm-7b-clm", trust_remote_code=True, torch_dtype=torch.bfloat16)
29
  if torch.cuda.is_available():
30
  model = model.cuda()
31
  model.eval()
@@ -37,6 +37,7 @@ for idx, each in enumerate(prompt):
37
  print(f"Begin generating idx: {idx} with prompt {each}")
38
  output = model.chat(tokenizer, each, **gen_kwargs)
39
  print(f"\nEnd generation with length: {len(output.split())} - seqs: {output}\n")
 
40
  ```
41
 
42
 
 
25
  import torch
26
 
27
  tokenizer = AutoTokenizer.from_pretrained("biomap-research/xtrimopglm-7b-clm", trust_remote_code=True, use_fast=True)
28
+ model = AutoModelForCausalLM.from_pretrained("biomap-research/xtrimopglm-7b-clm", trust_remote_code=True, torch_dtype=torch.bfloat16)
29
  if torch.cuda.is_available():
30
  model = model.cuda()
31
  model.eval()
 
37
  print(f"Begin generating idx: {idx} with prompt {each}")
38
  output = model.chat(tokenizer, each, **gen_kwargs)
39
  print(f"\nEnd generation with length: {len(output.split())} - seqs: {output}\n")
40
+
41
  ```
42
 
43