sarahciston commited on
Commit
115ec5c
1 Parent(s): d8376d0

distillgpt2 model

Browse files
Files changed (1) hide show
  1. sketch.js +6 -13
sketch.js CHANGED
@@ -20,22 +20,15 @@ async function textGenTask(pre, prompt){
20
  let INPUT = prompt
21
 
22
  // PICK MODEL
23
- let MODEL = 'Xenova/bloom-560m'
24
  // const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0', 'Xenova/distilgpt2', 'Xenova/bloom-560m']
25
 
26
  const pipe = await pipeline('text-generation', MODEL)
27
 
28
  // RUN INPUT THROUGH MODEL,
29
- var out = await pipe(INPUT)
30
-
31
- /* //setting hyperparameters
32
- , {
33
- max_new_tokens: 256,
34
- top_k: 50,
35
- temperature: 0.7,
36
- do_sample: true,
37
- })
38
- */
39
 
40
  console.log(await out)
41
  console.log('text-gen task completed')
@@ -158,8 +151,8 @@ new p5(function (p5){
158
  let outs = await textGenTask(PREPROMPT, PROMPT_INPUT)
159
  console.log(outs)
160
 
161
- let outText = p5.createP('')
162
- await outText.html(outs) // true appends text instead of replaces
163
  }
164
 
165
  });
 
20
  let INPUT = prompt
21
 
22
  // PICK MODEL
23
+ let MODEL = 'Xenova/distilgpt2'
24
  // const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0', 'Xenova/distilgpt2', 'Xenova/bloom-560m']
25
 
26
  const pipe = await pipeline('text-generation', MODEL)
27
 
28
  // RUN INPUT THROUGH MODEL,
29
+ var out = await pipe(INPUT, { top_k: 30, max_new_tokens: 100 })
30
+ // setting hyperparameters
31
+ // max_new_tokens: 256, top_k: 50, temperature: 0.7, do_sample: true,
 
 
 
 
 
 
 
32
 
33
  console.log(await out)
34
  console.log('text-gen task completed')
 
151
  let outs = await textGenTask(PREPROMPT, PROMPT_INPUT)
152
  console.log(outs)
153
 
154
+ let outText = p5.createP('...')
155
+ await outText.html(outs, 'false') // true appends text instead of replaces
156
  }
157
 
158
  });