File size: 4,713 Bytes
ceb472c
 
a083558
4189f9e
8993dbb
4189f9e
 
ceb472c
bd2d4fa
 
4fbd4b0
c4835d8
4189f9e
b23bba4
 
a083558
b23bba4
 
a482456
cb2c31d
 
a083558
 
a6f115b
8c46069
 
 
 
 
a6f115b
 
8c46069
 
 
98ddae8
8c46069
d6148e1
98ddae8
f27000a
8b4c4dc
115ec5c
8b4c4dc
304ffe0
b23bba4
 
 
a083558
b71619b
4d4ea35
a083558
b23bba4
f0ff9a3
b23bba4
f0ff9a3
b23bba4
 
a083558
f0ff9a3
 
 
 
 
b23bba4
 
 
 
 
eb4bd74
b23bba4
 
eb800fb
2988cff
 
8c46069
 
 
4d4ea35
6ff171e
eb800fb
855a94a
ceb472c
b23bba4
4d4ea35
 
b23bba4
 
 
 
 
6ff171e
b23bba4
2988cff
 
b23bba4
 
6ff171e
855a94a
60949e3
 
 
 
 
 
227827f
60949e3
 
 
 
 
 
 
 
227827f
8448a0a
9a02618
 
60949e3
 
 
4fbd4b0
e659e75
9a02618
e659e75
 
60949e3
 
aa87ec6
60949e3
 
 
e37c55c
c4835d8
 
8f87f71
c4835d8
2988cff
 
4fbd4b0
a083558
4fbd4b0
c4835d8
 
4fbd4b0
8f87f71
 
b71619b
 
a083558
 
514097e
8f87f71
 
c4835d8
60949e3
2e68462
 
60949e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160

// IMPORT LIBRARIES TOOLS
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';

// skip local model check
env.allowLocalModels = false;

// GLOBAL VARIABLES
let PREPROMPT = `Please complete the phrase and fill in any [MASK]: `
let PROMPT_INPUT = `The woman has a job as a...` // a field for writing or changing a text value
let pField
let outText

// RUN TEXT-GEN MODEL

async function textGenTask(pre, prompt){
  console.log('text-gen task initiated')

  // preprompt not working, fix later if we do chat templates
  // let INPUT = pre + prompt
  let INPUT = prompt

  // PICK MODEL 
  let MODEL = 'Xenova/stablelm-2-zephyr-1_6b'

  // MODELS LIST
  // - Xenova/bloom-560m
  // - Xenova/distilgpt2
  // - Xenova/LaMini-Cerebras-256M
  // - Xenova/gpt-neo-125M // not working well
  // - Xenova/llama2.c-stories15M // only fairytails
  // - webml/TinyLlama-1.1B-Chat-v1.0
  // - Xenova/TinyLlama-1.1B-Chat-v1.0
  // - Xenova/stablelm-2-zephyr-1_6b

  
  const pipe = await pipeline('text-generation', MODEL)

  // RUN INPUT THROUGH MODEL, 
  var out = await pipe(INPUT, { max_new_tokens: 60, top_k: 90, repetition_penalty: 1.5 })
  // setting hyperparameters
  // max_new_tokens: 256, top_k: 50, temperature: 0.7, do_sample: true, no_repeat_ngram_size: 2,

  console.log(await out)
  console.log('text-gen task completed')

  // PARSE RESULTS as a list of outputs, two different ways depending on the model
  
  let OUTPUT_LIST = [] // a blank array to store the results from the model
  
  // parsing of output
  await out.forEach(o => {
    console.log(o)
    OUTPUT_LIST.push(o.generated_text)
  })

  // alternate format for parsing, for chat model type
  // await out.choices.forEach(o => {
  //   console.log(o)
  //   OUTPUT_LIST.push(o.message.content)
  // })

  console.log(OUTPUT_LIST)
  console.log('text-gen parsing complete')

  return await OUTPUT_LIST
  // return await out
}

// RUN FILL-IN MODEL
async function fillInTask(input){
  console.log('fill-in task initiated')

  // MODELS LIST
  // - Xenova/bert-base-uncased

  const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
  
  var out = await pipe(input);

  console.log(await out) // yields { score, sequence, token, token_str } for each result

  let OUTPUT_LIST = [] // a blank array to store the results from the model

  // parsing of output
  await out.forEach(o => {
    console.log(o) // yields { score, sequence, token, token_str } for each result
    OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
  })
  
  console.log(await OUTPUT_LIST)
  console.log('fill-in task completed')

  // return await out
  return await OUTPUT_LIST
}

//// p5.js Instance

new p5(function (p5){
  p5.setup = function(){
      p5.noCanvas()
      console.log('p5 instance loaded')
      makeTextDisplay()
      makeFields()
      makeButtons()
    }

  p5.draw = function(){
      // 
  }

  function makeTextDisplay(){
    let title = p5.createElement('h1','p5.js Critical AI Prompt Battle')
    let intro = p5.createP(`This tool lets you explore several AI prompts results at once.`) 
    p5.createP(`Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`)
  }

  function makeFields(){
    pField = p5.createInput(PROMPT_INPUT) // turns the string into an input; now access the text via PROMPT_INPUT.value()
    pField.size(700)
    pField.attribute('label', `Write a text prompt with one [MASK] that the model will fill in.`)
    p5.createP(pField.attribute('label'))
    pField.addClass("prompt")
  }

  function makeButtons(){
    let submitButton = p5.createButton("SUBMIT")
    submitButton.size(170)
    submitButton.class('submit')
    submitButton.mousePressed(displayResults)
    
    // also make results placeholder
    let outHeader = p5.createElement('h3',"Results")
    outText = p5.createP('').id('results')
  }

  async function displayResults(){
    console.log('submitButton pressed')

    // insert waiting dots into results space of interface
    outText.html('...', false)

    PROMPT_INPUT = pField.value() // grab update to the prompt if it's been changed
    console.log("latest prompt: ", PROMPT_INPUT)
    
    // call the function that runs the model for the task of your choice here 
    // make sure to use the PROMPT_INPUT as a parameter, or also the PREPROMPT if valid for that task
    let outs = await textGenTask(PREPROMPT, PROMPT_INPUT)
    console.log(outs)

    // insert the model outputs into the paragraph
    await outText.html(outs, false) // false replaces text instead of appends??
  }

});