Sarah Ciston
commited on
Commit
·
f8c97c4
1
Parent(s):
a5c7c0a
try pipe again
Browse files
sketch.js
CHANGED
@@ -158,16 +158,10 @@ new p5(function (p5) {
|
|
158 |
async function runModel(PREPROMPT, PROMPT){
|
159 |
// // Chat completion API
|
160 |
|
161 |
-
// let inputText = PREPROMPT + PROMPT
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
// pipeline/transformers version
|
167 |
// let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
|
168 |
// seems to work with default model distilgpt2 ugh
|
169 |
|
170 |
-
let pipe = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta")
|
171 |
|
172 |
// IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
|
173 |
|
@@ -176,13 +170,15 @@ async function runModel(PREPROMPT, PROMPT){
|
|
176 |
// 'Xenova/gpt-3.5-turbo'
|
177 |
// , 'Xenova/distilgpt2'
|
178 |
|
179 |
-
// let
|
180 |
// max_tokens: 250,
|
181 |
// return_full_text: false
|
182 |
// repetition_penalty: 1.5,
|
183 |
// num_return_sequences: 1 //must be 1 for greedy search
|
184 |
// })
|
185 |
|
|
|
|
|
186 |
// for zephyr customizing
|
187 |
let MESSAGES = [
|
188 |
{
|
@@ -194,25 +190,17 @@ async function runModel(PREPROMPT, PROMPT){
|
|
194 |
}
|
195 |
]
|
196 |
|
197 |
-
|
198 |
-
// let prompt = pipe(tokenizer.apply_chat_template(MESSAGES, {
|
199 |
-
// tokenize: false,
|
200 |
-
// add_generation_prompt: true
|
201 |
-
// })
|
202 |
-
|
203 |
-
let out = await pipe(MESSAGES, {
|
204 |
max_new_tokens: 150,
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
})
|
210 |
-
|
211 |
|
212 |
-
console.log(
|
213 |
|
214 |
-
// var modelResult = await
|
215 |
-
var modelResult = await
|
216 |
console.log(modelResult)
|
217 |
|
218 |
return modelResult
|
|
|
158 |
async function runModel(PREPROMPT, PROMPT){
|
159 |
// // Chat completion API
|
160 |
|
|
|
|
|
|
|
|
|
|
|
161 |
// pipeline/transformers version
|
162 |
// let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
|
163 |
// seems to work with default model distilgpt2 ugh
|
164 |
|
|
|
165 |
|
166 |
// IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
|
167 |
|
|
|
170 |
// 'Xenova/gpt-3.5-turbo'
|
171 |
// , 'Xenova/distilgpt2'
|
172 |
|
173 |
+
// let res = await pipe(inputText, {
|
174 |
// max_tokens: 250,
|
175 |
// return_full_text: false
|
176 |
// repetition_penalty: 1.5,
|
177 |
// num_return_sequences: 1 //must be 1 for greedy search
|
178 |
// })
|
179 |
|
180 |
+
let pipe = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta")
|
181 |
+
|
182 |
// for zephyr customizing
|
183 |
let MESSAGES = [
|
184 |
{
|
|
|
190 |
}
|
191 |
]
|
192 |
|
193 |
+
let res = await pipe(MESSAGES, {
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
max_new_tokens: 150,
|
195 |
+
temperature: 0.7,
|
196 |
+
top_k: 50,
|
197 |
+
top_p: 0.95
|
198 |
+
});
|
|
|
|
|
199 |
|
200 |
+
console.log(res)
|
201 |
|
202 |
+
// var modelResult = await res[0].generated_text
|
203 |
+
var modelResult = await res[0].generated_text[0].content
|
204 |
console.log(modelResult)
|
205 |
|
206 |
return modelResult
|