Sarah Ciston commited on
Commit
a5c7c0a
·
1 Parent(s): ed7071e

try with new pipe instructions

Browse files
Files changed (1) hide show
  1. sketch.js +21 -21
sketch.js CHANGED
@@ -160,24 +160,14 @@ async function runModel(PREPROMPT, PROMPT){
160
 
161
  // let inputText = PREPROMPT + PROMPT
162
 
163
- // for zephyr customizing
164
- let inputs = [
165
- {
166
- "role": "system",
167
- "content": PREPROMPT
168
- },{
169
- "role": "user",
170
- "content": PROMPT,
171
- "max_new_tokens": 256,
172
- "do_sample": false
173
- }
174
- ]
175
 
176
 
177
  // pipeline/transformers version
178
  // let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
179
  // seems to work with default model distilgpt2 ugh
180
- let pipe = pipeline('text-generation', "HuggingFaceH4/zephyr-7b-beta")
 
181
 
182
  // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
183
 
@@ -193,19 +183,29 @@ async function runModel(PREPROMPT, PROMPT){
193
  // num_return_sequences: 1 //must be 1 for greedy search
194
  // })
195
 
 
 
 
 
 
 
 
 
 
 
 
196
  // DID NOT WORK for zephyr customizing
197
- // let prompt = pipe.tokenizer.apply_chat_template(inputs, {
198
  // tokenize: false,
199
  // add_generation_prompt: true
200
  // })
201
 
202
- let out = await pipe(inputs, {
203
- // max_new_tokens: 256,
204
- // do_sample: true,
205
- "return_full_text": false,
206
- temperature: 0.7,
207
- top_k: 50,
208
- top_p: 0.95
209
  })
210
 
211
 
 
160
 
161
  // let inputText = PREPROMPT + PROMPT
162
 
163
+
 
 
 
 
 
 
 
 
 
 
 
164
 
165
 
166
  // pipeline/transformers version
167
  // let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
168
  // seems to work with default model distilgpt2 ugh
169
+
170
+ let pipe = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta")
171
 
172
  // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
173
 
 
183
  // num_return_sequences: 1 //must be 1 for greedy search
184
  // })
185
 
186
+ // for zephyr customizing
187
+ let MESSAGES = [
188
+ {
189
+ "role": "system",
190
+ "content": PREPROMPT
191
+ },{
192
+ "role": "user",
193
+ "content": PROMPT
194
+ }
195
+ ]
196
+
197
  // DID NOT WORK for zephyr customizing
198
+ // let prompt = pipe(tokenizer.apply_chat_template(MESSAGES, {
199
  // tokenize: false,
200
  // add_generation_prompt: true
201
  // })
202
 
203
+ let out = await pipe(MESSAGES, {
204
+ max_new_tokens: 150,
205
+ return_full_text: false
206
+ // temperature: 0.7,
207
+ // top_k: 50,
208
+ // top_p: 0.95
 
209
  })
210
 
211