Spaces:
Running
Running
sarahciston
commited on
Commit
•
a482456
1
Parent(s):
cb2c31d
try new Xenova/llama2.c-stories15M model for storytelling
Browse files
sketch.js
CHANGED
@@ -16,12 +16,12 @@ let outText
|
|
16 |
async function textGenTask(pre, prompt){
|
17 |
console.log('text-gen task initiated')
|
18 |
|
19 |
-
// preprompt not working, fix later
|
20 |
// let INPUT = pre + prompt
|
21 |
let INPUT = prompt
|
22 |
|
23 |
// PICK MODEL
|
24 |
-
let MODEL = 'Xenova/
|
25 |
// const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0', 'Xenova/distilgpt2', 'Xenova/bloom-560m']
|
26 |
|
27 |
const pipe = await pipeline('text-generation', MODEL)
|
@@ -82,23 +82,6 @@ async function fillInTask(input){
|
|
82 |
return await OUTPUT_LIST
|
83 |
}
|
84 |
|
85 |
-
// // PROCESS MODEL OUTPUT
|
86 |
-
// // a generic function to pass in different model task functions
|
87 |
-
// async function getOutputs(task){
|
88 |
-
// let output = await task
|
89 |
-
|
90 |
-
// await output.forEach(o => {
|
91 |
-
// OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
|
92 |
-
// })
|
93 |
-
|
94 |
-
// console.log(OUTPUT_LIST)
|
95 |
-
|
96 |
-
// return await OUTPUT_LIST
|
97 |
-
// }
|
98 |
-
|
99 |
-
// await getOutputs(fillInTask()) // getOutputs will later connect to the interface to display results
|
100 |
-
|
101 |
-
|
102 |
//// p5.js Instance
|
103 |
|
104 |
new p5(function (p5){
|
@@ -126,8 +109,6 @@ new p5(function (p5){
|
|
126 |
pField.attribute('label', `Write a text prompt with one [MASK] that the model will fill in.`)
|
127 |
p5.createP(pField.attribute('label'))
|
128 |
pField.addClass("prompt")
|
129 |
-
// pField.value(PROMPT_INPUT)
|
130 |
-
// console.log(pField.value())
|
131 |
}
|
132 |
|
133 |
function makeButtons(){
|
|
|
16 |
async function textGenTask(pre, prompt){
|
17 |
console.log('text-gen task initiated')
|
18 |
|
19 |
+
// preprompt not working, fix later if we do chat templates
|
20 |
// let INPUT = pre + prompt
|
21 |
let INPUT = prompt
|
22 |
|
23 |
// PICK MODEL
|
24 |
+
let MODEL = 'Xenova/llama2.c-stories15M'
|
25 |
// const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0', 'Xenova/distilgpt2', 'Xenova/bloom-560m']
|
26 |
|
27 |
const pipe = await pipeline('text-generation', MODEL)
|
|
|
82 |
return await OUTPUT_LIST
|
83 |
}
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
//// p5.js Instance
|
86 |
|
87 |
new p5(function (p5){
|
|
|
109 |
pField.attribute('label', `Write a text prompt with one [MASK] that the model will fill in.`)
|
110 |
p5.createP(pField.attribute('label'))
|
111 |
pField.addClass("prompt")
|
|
|
|
|
112 |
}
|
113 |
|
114 |
function makeButtons(){
|