Update README.md
Browse files
README.md
CHANGED
@@ -70,8 +70,8 @@ We can perform inference for the Skywork-MoE-base (16x13B size) model using Hugg
|
|
70 |
|
71 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
72 |
|
73 |
-
model = AutoModelForCausalLM.from_pretrained("Skywork/Skywork-MoE-
|
74 |
-
tokenizer = AutoTokenizer.from_pretrained("Skywork/Skywork-MoE-
|
75 |
|
76 |
inputs = tokenizer('陕西的省会是西安', return_tensors='pt').to(model.device)
|
77 |
response = model.generate(inputs.input_ids, max_length=128)
|
|
|
70 |
|
71 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
72 |
|
73 |
+
model = AutoModelForCausalLM.from_pretrained("Skywork/Skywork-MoE-Base", trust_remote_code=True, device_map='auto')
|
74 |
+
tokenizer = AutoTokenizer.from_pretrained("Skywork/Skywork-MoE-Base", trust_remote_code=True)
|
75 |
|
76 |
inputs = tokenizer('陕西的省会是西安', return_tensors='pt').to(model.device)
|
77 |
response = model.generate(inputs.input_ids, max_length=128)
|