sarahciston commited on
Commit
f0ff9a3
1 Parent(s): 76ed4e2

try parsing messages again

Browse files
Files changed (1) hide show
  1. sketch.js +13 -13
sketch.js CHANGED
@@ -19,16 +19,16 @@ async function textGenTask(input){
19
 
20
  // const = modelsList = ['Xenova/LaMini-Cerebras-256M', ]
21
 
22
- // let messages = [
23
- // {"role": "system", "content": PREPROMPT},
24
- // {"role": "user", "content": input}
25
- // ]
26
 
27
  // const prompt = pipe.tokenizer.apply_chat_template(messages, {
28
- // tokenize: false, add_generation_prompt: true,
29
  // });
30
 
31
- var out = await pipe(input, {
32
  max_new_tokens: 256,
33
  temperature: 0.7,
34
  do_sample: true,
@@ -41,16 +41,16 @@ async function textGenTask(input){
41
  let OUTPUT_LIST = [] // a blank array to store the results from the model
42
 
43
  // parsing of output
44
- // await out.forEach(o => {
45
- // console.log(o)
46
- // OUTPUT_LIST.push(o.generated_text)
47
- // })
48
-
49
- await out.choices.forEach(o => {
50
  console.log(o)
51
- OUTPUT_LIST.push(o.message.content)
52
  })
53
 
 
 
 
 
 
54
  console.log(OUTPUT_LIST)
55
  console.log('text-gen parsing complete')
56
 
 
19
 
20
  // const = modelsList = ['Xenova/LaMini-Cerebras-256M', ]
21
 
22
+ let messages = [
23
+ {"role": "system", "content": PREPROMPT},
24
+ {"role": "user", "content": input}
25
+ ]
26
 
27
  // const prompt = pipe.tokenizer.apply_chat_template(messages, {
28
+ // tokenize: false, add_generation_prompt: false,
29
  // });
30
 
31
+ var out = await pipe(messages, {
32
  max_new_tokens: 256,
33
  temperature: 0.7,
34
  do_sample: true,
 
41
  let OUTPUT_LIST = [] // a blank array to store the results from the model
42
 
43
  // parsing of output
44
+ await out.forEach(o => {
 
 
 
 
 
45
  console.log(o)
46
+ OUTPUT_LIST.push(o.generated_text)
47
  })
48
 
49
+ // await out.choices.forEach(o => {
50
+ // console.log(o)
51
+ // OUTPUT_LIST.push(o.message.content)
52
+ // })
53
+
54
  console.log(OUTPUT_LIST)
55
  console.log('text-gen parsing complete')
56