Sarah Ciston commited on
Commit
ee201d1
·
1 Parent(s): 50b0541

try HG model w sample params

Browse files
Files changed (2) hide show
  1. README.md +2 -1
  2. sketch.js +17 -9
README.md CHANGED
@@ -6,12 +6,13 @@ colorTo: blue
6
  sdk: static
7
  pinned: false
8
  models:
 
9
  # - openai-community/gpt2
10
  # - meta-llama/Meta-Llama-3-70B-Instruct
11
  # - Xenova/detr-resnet-50
12
  # - Xenova/gpt2
13
  # - Xenova/bloom-560m
14
- - Xenova/distilgpt2
15
  # - Xenova/gpt-3.5-turbo
16
  # - Xenova/llama-68m
17
  # - Xenova/LaMini-Flan-T5-783M
 
6
  sdk: static
7
  pinned: false
8
  models:
9
+ - HuggingFaceH4/zephyr-7b-beta
10
  # - openai-community/gpt2
11
  # - meta-llama/Meta-Llama-3-70B-Instruct
12
  # - Xenova/detr-resnet-50
13
  # - Xenova/gpt2
14
  # - Xenova/bloom-560m
15
+ # - Xenova/distilgpt2
16
  # - Xenova/gpt-3.5-turbo
17
  # - Xenova/llama-68m
18
  # - Xenova/LaMini-Flan-T5-783M
sketch.js CHANGED
@@ -161,26 +161,34 @@ async function runModel(PREPROMPT, PROMPT){
161
  let inputText = PREPROMPT + PROMPT
162
 
163
  // pipeline/transformers version
164
- let pipe = await pipeline('text-generation');
165
  // seems to work with default model distilgpt2 ugh
 
 
 
166
 
167
  // 'meta-llama/Meta-Llama-3-70B-Instruct'
168
  // 'openai-community/gpt2'
169
  // 'Xenova/gpt-3.5-turbo'
170
  // , 'Xenova/distilgpt2'
171
 
172
- let out = await pipe(inputText, {
173
- max_tokens: 250
174
- // return_full_text: false
175
- // repetition_penalty: 1.5,
176
- // num_return_sequences: 1 //must be 1 for greedy search
177
- })
178
 
179
- // let out = await pipe(inputText)
 
 
 
 
180
 
181
  console.log(out)
182
 
183
- var modelResult = await out[0].generated_text
 
184
  console.log(modelResult)
185
 
186
  return modelResult
 
161
  let inputText = PREPROMPT + PROMPT
162
 
163
  // pipeline/transformers version
164
+ // let generator = await pipeline('text-generation', 'Xenova/distilgpt2');
165
  // seems to work with default model distilgpt2 ugh
166
+ let generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta")
167
+
168
+ // IMPORTANT: different models have different input/output structures for their API so look to the samples and references
169
 
170
  // 'meta-llama/Meta-Llama-3-70B-Instruct'
171
  // 'openai-community/gpt2'
172
  // 'Xenova/gpt-3.5-turbo'
173
  // , 'Xenova/distilgpt2'
174
 
175
+ // let out = await generator(inputText, {
176
+ // max_tokens: 250,
177
+ // return_full_text: false
178
+ // repetition_penalty: 1.5,
179
+ // num_return_sequences: 1 //must be 1 for greedy search
180
+ // })
181
 
182
+ let out = await generator({
183
+ "role": "user",
184
+ "content": inputText,
185
+ "max_tokens": 250
186
+ })
187
 
188
  console.log(out)
189
 
190
+ // var modelResult = await out[0].generated_text
191
+ var modelResult = await out[0].generated_text[0].content
192
  console.log(modelResult)
193
 
194
  return modelResult