Sarah Ciston
commited on
Commit
·
dcfef54
1
Parent(s):
840cbc5
reconnect model
Browse files
sketch.js
CHANGED
@@ -129,6 +129,12 @@ new p5(function(p5){
|
|
129 |
// Map the list of blanks text values to a new list
|
130 |
let inputValues = blankArray.map(i => i.value())
|
131 |
console.log(inputValues)
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
}
|
133 |
|
134 |
// var modelResult = submitButton.mousePressed(runModel) = function(){
|
@@ -154,19 +160,20 @@ new p5(function(p5){
|
|
154 |
// /// this needs to run on button click, use string variables to blank in the form
|
155 |
// var PROMPT = promptInput.value()
|
156 |
|
|
|
157 |
// var blankArray = ["mother", "father", "sister", "brother"]
|
158 |
// // for num of blanks put in list
|
159 |
// var blankArray = [`${blankAResult}`, `${blankBResult}`, `${blankCResult}`]
|
160 |
|
161 |
|
162 |
-
|
163 |
-
//
|
164 |
-
|
165 |
-
|
166 |
-
//
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
|
171 |
// // let out = await pipe(PREPROMPT + PROMPT)
|
172 |
// // let out = await pipe(PREPROMPT + PROMPT, {
|
|
|
129 |
// Map the list of blanks text values to a new list
|
130 |
let inputValues = blankArray.map(i => i.value())
|
131 |
console.log(inputValues)
|
132 |
+
|
133 |
+
// Do model stuff in this function instead of in general
|
134 |
+
PROMPT = promptInput.value() // updated check of the prompt field
|
135 |
+
|
136 |
+
await runModel()
|
137 |
+
// BLANKS = inputValues // get ready to feed array list into model
|
138 |
}
|
139 |
|
140 |
// var modelResult = submitButton.mousePressed(runModel) = function(){
|
|
|
160 |
// /// this needs to run on button click, use string variables to blank in the form
|
161 |
// var PROMPT = promptInput.value()
|
162 |
|
163 |
+
|
164 |
// var blankArray = ["mother", "father", "sister", "brother"]
|
165 |
// // for num of blanks put in list
|
166 |
// var blankArray = [`${blankAResult}`, `${blankBResult}`, `${blankCResult}`]
|
167 |
|
168 |
|
169 |
+
async function runModel(prompt, blanks){
|
170 |
+
// Chat completion API
|
171 |
+
const out = await inference.chatCompletion({
|
172 |
+
model: MODELNAME,
|
173 |
+
// model: "google/gemma-2-9b",
|
174 |
+
messages: [{ role: "user", content: PREPROMPT + PROMPT }],
|
175 |
+
max_tokens: 100
|
176 |
+
});
|
177 |
|
178 |
// // let out = await pipe(PREPROMPT + PROMPT)
|
179 |
// // let out = await pipe(PREPROMPT + PROMPT, {
|