Update README.md
Browse files
README.md
CHANGED
@@ -5,7 +5,7 @@ license: mit
|
|
5 |
model_creator: haoranxu
|
6 |
model_name: ALMA 13B Pretrain
|
7 |
model_type: llama
|
8 |
-
prompt_template: 'Translate this from Chinese to English
|
9 |
|
10 |
Chinese: {prompt}
|
11 |
|
@@ -61,7 +61,7 @@ It is also now supported by continuous batching server [vLLM](https://github.com
|
|
61 |
## Prompt template: ALMA
|
62 |
|
63 |
```
|
64 |
-
Translate this from Chinese to English
|
65 |
Chinese: {prompt}
|
66 |
English:
|
67 |
|
@@ -160,7 +160,7 @@ model = AutoAWQForCausalLM.from_quantized(model_name_or_path, fuse_layers=True,
|
|
160 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False)
|
161 |
|
162 |
prompt = "Tell me about AI"
|
163 |
-
prompt_template=f'''Translate this from Chinese to English
|
164 |
Chinese: {prompt}
|
165 |
English:
|
166 |
|
|
|
5 |
model_creator: haoranxu
|
6 |
model_name: ALMA 13B Pretrain
|
7 |
model_type: llama
|
8 |
+
prompt_template: 'Translate this from Chinese to English:
|
9 |
|
10 |
Chinese: {prompt}
|
11 |
|
|
|
61 |
## Prompt template: ALMA
|
62 |
|
63 |
```
|
64 |
+
Translate this from Chinese to English:
|
65 |
Chinese: {prompt}
|
66 |
English:
|
67 |
|
|
|
160 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False)
|
161 |
|
162 |
prompt = "Tell me about AI"
|
163 |
+
prompt_template=f'''Translate this from Chinese to English:
|
164 |
Chinese: {prompt}
|
165 |
English:
|
166 |
|