Sarah Ciston commited on
Commit
f086ae3
·
1 Parent(s): 023d9ac

try a different pipe type

Browse files
Files changed (1) hide show
  1. sketch.js +6 -4
sketch.js CHANGED
@@ -168,7 +168,8 @@ async function runModel(PREPROMPT, PROMPT){
168
  },{
169
  "role": "user",
170
  "content": PROMPT,
171
- "max_tokens": 250
 
172
  }
173
  ]
174
 
@@ -176,7 +177,7 @@ async function runModel(PREPROMPT, PROMPT){
176
  // pipeline/transformers version
177
  // let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
178
  // seems to work with default model distilgpt2 ugh
179
- let pipe = pipeline('text-generation', "HuggingFaceH4/zephyr-7b-beta")
180
 
181
  // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
182
 
@@ -199,8 +200,9 @@ async function runModel(PREPROMPT, PROMPT){
199
  })
200
 
201
  let out = await pipe(prompt, {
202
- max_new_tokens: 256,
203
- do_sample: true,
 
204
  temperature: 0.7,
205
  top_k: 50,
206
  top_p: 0.95
 
168
  },{
169
  "role": "user",
170
  "content": PROMPT,
171
+ "max_new_tokens": 256,
172
+ "do_sample": false
173
  }
174
  ]
175
 
 
177
  // pipeline/transformers version
178
  // let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
179
  // seems to work with default model distilgpt2 ugh
180
+ let pipe = pipeline('chat-completion', "HuggingFaceH4/zephyr-7b-beta")
181
 
182
  // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
183
 
 
200
  })
201
 
202
  let out = await pipe(prompt, {
203
+ // max_new_tokens: 256,
204
+ // do_sample: true,
205
+ "return_full_text": false,
206
  temperature: 0.7,
207
  top_k: 50,
208
  top_p: 0.95