Sarah Ciston commited on
Commit
e70fc28
·
1 Parent(s): ee201d1

try new zephyr model with lots of params

Browse files
Files changed (1) hide show
  1. sketch.js +29 -7
sketch.js CHANGED
@@ -158,14 +158,27 @@ new p5(function (p5) {
158
  async function runModel(PREPROMPT, PROMPT){
159
  // // Chat completion API
160
 
161
- let inputText = PREPROMPT + PROMPT
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
  // pipeline/transformers version
164
  // let generator = await pipeline('text-generation', 'Xenova/distilgpt2');
165
  // seems to work with default model distilgpt2 ugh
166
- let generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta")
167
 
168
- // IMPORTANT: different models have different input/output structures for their API so look to the samples and references
169
 
170
  // 'meta-llama/Meta-Llama-3-70B-Instruct'
171
  // 'openai-community/gpt2'
@@ -179,11 +192,20 @@ async function runModel(PREPROMPT, PROMPT){
179
  // num_return_sequences: 1 //must be 1 for greedy search
180
  // })
181
 
182
- let out = await generator({
183
- "role": "user",
184
- "content": inputText,
185
- "max_tokens": 250
186
  })
 
 
 
 
 
 
 
 
 
187
 
188
  console.log(out)
189
 
 
158
  async function runModel(PREPROMPT, PROMPT){
159
  // // Chat completion API
160
 
161
+ // let inputText = PREPROMPT + PROMPT
162
+
163
+ // for zephyr customizing
164
+ let inputs = [
165
+ {
166
+ "role": "system",
167
+ "content": PREPROMPT
168
+ },{
169
+ "role": "user",
170
+ "content": PROMPT,
171
+ "max_tokens": 250
172
+ }
173
+ ]
174
+
175
 
176
  // pipeline/transformers version
177
  // let generator = await pipeline('text-generation', 'Xenova/distilgpt2');
178
  // seems to work with default model distilgpt2 ugh
179
+ let generator = pipeline('text-generation', "HuggingFaceH4/zephyr-7b-beta")
180
 
181
+ // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
182
 
183
  // 'meta-llama/Meta-Llama-3-70B-Instruct'
184
  // 'openai-community/gpt2'
 
192
  // num_return_sequences: 1 //must be 1 for greedy search
193
  // })
194
 
195
+ // for zephyr customizing
196
+ let prompt = pipe.tokenizer.apply_chat_template(inputs, {
197
+ tokenize: false,
198
+ add_generation_prompt: true
199
  })
200
+
201
+ let out = await generator(prompt, {
202
+ max_new_tokens: 256,
203
+ do_sample: true,
204
+ temperature: 0.7,
205
+ top_k: 50,
206
+ top_p: 0.95
207
+ })
208
+
209
 
210
  console.log(out)
211