Sarah Ciston
commited on
Commit
·
37c8ccd
1
Parent(s):
16186bd
switch from inference to pipeline
Browse files
README.md
CHANGED
@@ -7,7 +7,6 @@ sdk: static
|
|
7 |
pinned: false
|
8 |
models:
|
9 |
- meta-llama/Meta-Llama-3-70B-Instruct
|
10 |
-
# - meta-llama/Meta-Llama-3-70B-Instruct
|
11 |
# - Xenova/detr-resnet-50
|
12 |
# - Xenova/gpt2
|
13 |
# - Xenova/bloom-560m
|
@@ -15,7 +14,7 @@ models:
|
|
15 |
# - Xenova/gpt-3.5-turbo
|
16 |
# - Xenova/llama-68m
|
17 |
# - Xenova/LaMini-Flan-T5-783M
|
18 |
-
- mistralai/Mistral-7B-Instruct-v0.2
|
19 |
# - meta-llama/Meta-Llama-3-8B
|
20 |
|
21 |
---
|
|
|
7 |
pinned: false
|
8 |
models:
|
9 |
- meta-llama/Meta-Llama-3-70B-Instruct
|
|
|
10 |
# - Xenova/detr-resnet-50
|
11 |
# - Xenova/gpt2
|
12 |
# - Xenova/bloom-560m
|
|
|
14 |
# - Xenova/gpt-3.5-turbo
|
15 |
# - Xenova/llama-68m
|
16 |
# - Xenova/LaMini-Flan-T5-783M
|
17 |
+
# - mistralai/Mistral-7B-Instruct-v0.2
|
18 |
# - meta-llama/Meta-Llama-3-8B
|
19 |
|
20 |
---
|
sketch.js
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
-
//
|
2 |
-
import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
|
3 |
-
const inference = new HfInference();
|
4 |
|
5 |
-
|
|
|
|
|
|
|
|
|
6 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
7 |
// list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
|
8 |
|
@@ -12,22 +14,33 @@ const inference = new HfInference();
|
|
12 |
|
13 |
///////// VARIABLES
|
14 |
|
|
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
|
|
|
|
|
|
|
18 |
|
19 |
// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
|
20 |
|
|
|
21 |
let blankArray = []
|
22 |
|
23 |
-
let MODELNAME = 'mistralai/Mistral-7B-Instruct-v0.2'
|
24 |
-
// let MODELNAME = "meta-llama/Meta-Llama-3-8B-Instruct"
|
25 |
-
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', "meta-llama/Meta-Llama-3-70B-Instruct", 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2', "meta-llama/Meta-Llama-3-70B-Instruct")
|
26 |
|
27 |
///// p5 STUFF
|
28 |
|
|
|
29 |
|
30 |
-
new p5(function(p5){
|
31 |
p5.setup = function(){
|
32 |
console.log('p5 loaded')
|
33 |
p5.noCanvas()
|
@@ -180,13 +193,39 @@ new p5(function(p5){
|
|
180 |
|
181 |
|
182 |
async function runModel(PREPROMPT, PROMPT){
|
183 |
-
// Chat completion API
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
//inference.fill_mask({
|
191 |
// let out = await pipe(PREPROMPT + PROMPT)
|
192 |
// let out = await pipe(PREPROMPT + PROMPT, {
|
@@ -198,16 +237,7 @@ async function runModel(PREPROMPT, PROMPT){
|
|
198 |
// // num_beams: 2,
|
199 |
// num_return_sequences: 1
|
200 |
// });
|
201 |
-
console.log(out)
|
202 |
-
|
203 |
-
// modelResult = await out.messages[0].content
|
204 |
-
|
205 |
-
var modelResult = await out.choices[0].message.content
|
206 |
-
// var modelResult = await out[0].generated_text
|
207 |
-
console.log(modelResult);
|
208 |
|
209 |
-
return modelResult
|
210 |
-
}
|
211 |
|
212 |
|
213 |
// Reference the elements that we will need
|
@@ -222,6 +252,7 @@ async function runModel(PREPROMPT, PROMPT){
|
|
222 |
// status.textContent = 'Loading model...';
|
223 |
// const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
|
224 |
|
|
|
225 |
// status.textContent = 'Ready';
|
226 |
|
227 |
// example.addEventListener('click', (e) => {
|
|
|
1 |
+
// connect to API via module
|
|
|
|
|
2 |
|
3 |
+
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
|
4 |
+
// import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
|
5 |
+
// const inference = new HfInference();
|
6 |
+
|
7 |
+
// PIPELINE MODELS
|
8 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
9 |
// list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
|
10 |
|
|
|
14 |
|
15 |
///////// VARIABLES
|
16 |
|
17 |
+
// establish global variables
|
18 |
|
19 |
+
let PROMPT,
|
20 |
+
PREPROMPT,
|
21 |
+
promptResult,
|
22 |
+
submitButton,
|
23 |
+
addButton,
|
24 |
+
promptInput,
|
25 |
+
modelDisplay,
|
26 |
+
modelResult;
|
27 |
|
28 |
+
// pick a model (see list of models)
|
29 |
+
// INFERENCE MODELS
|
30 |
+
// let MODELNAME = "mistralai/Mistral-7B-Instruct-v0.2";
|
31 |
+
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', "meta-llama/Meta-Llama-3-70B-Instruct", 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2', "meta-llama/Meta-Llama-3-70B-Instruct")
|
32 |
|
33 |
// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
|
34 |
|
35 |
+
|
36 |
let blankArray = []
|
37 |
|
|
|
|
|
|
|
38 |
|
39 |
///// p5 STUFF
|
40 |
|
41 |
+
// create an instance of the p5 class as a workspace for all your p5.js code
|
42 |
|
43 |
+
new p5(function (p5) {
|
44 |
p5.setup = function(){
|
45 |
console.log('p5 loaded')
|
46 |
p5.noCanvas()
|
|
|
193 |
|
194 |
|
195 |
async function runModel(PREPROMPT, PROMPT){
|
196 |
+
// // Chat completion API
|
197 |
+
|
198 |
+
// inference API version, not working in spaces
|
199 |
+
// const out = await inference.chatCompletion({
|
200 |
+
// model: MODELNAME,
|
201 |
+
// messages: [{ role: "user", content: PREPROMPT + PROMPT }],
|
202 |
+
// max_tokens: 100
|
203 |
+
// });
|
204 |
+
|
205 |
+
// console.log(out)
|
206 |
+
|
207 |
+
// // modelResult = await out.messages[0].content
|
208 |
+
|
209 |
+
// var modelResult = await out.choices[0].message.content
|
210 |
+
// // var modelResult = await out[0].generated_text
|
211 |
+
// console.log(modelResult);
|
212 |
+
|
213 |
+
// return modelResult
|
214 |
+
|
215 |
+
// pipeline/transformers version TEST
|
216 |
+
let pipe = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-70B-Instruct');
|
217 |
+
|
218 |
+
out = pipe((PREPROMPT, PROMPT), num_return_sequences=3, return_full_text=false)
|
219 |
|
220 |
+
console.log(out)
|
221 |
+
|
222 |
+
var modelResult = await out.generated_text
|
223 |
+
console.log(modelResult)
|
224 |
+
|
225 |
+
return modelResult
|
226 |
+
|
227 |
+
}
|
228 |
+
|
229 |
//inference.fill_mask({
|
230 |
// let out = await pipe(PREPROMPT + PROMPT)
|
231 |
// let out = await pipe(PREPROMPT + PROMPT, {
|
|
|
237 |
// // num_beams: 2,
|
238 |
// num_return_sequences: 1
|
239 |
// });
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
240 |
|
|
|
|
|
241 |
|
242 |
|
243 |
// Reference the elements that we will need
|
|
|
252 |
// status.textContent = 'Loading model...';
|
253 |
// const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
|
254 |
|
255 |
+
|
256 |
// status.textContent = 'Ready';
|
257 |
|
258 |
// example.addEventListener('click', (e) => {
|
style.css
CHANGED
@@ -99,8 +99,6 @@ canvas {
|
|
99 |
|
100 |
input {
|
101 |
margin: 1rem;
|
102 |
-
size: 200;
|
103 |
-
/* position: relative; */
|
104 |
font-size: 12pt;
|
105 |
}
|
106 |
|
@@ -108,7 +106,6 @@ button {
|
|
108 |
float: left;
|
109 |
font-size: 16px;
|
110 |
margin: 1rem;
|
111 |
-
/* size: 200; */
|
112 |
padding: 1rem 2rem;
|
113 |
cursor: pointer;
|
114 |
border-radius: 4px;
|
|
|
99 |
|
100 |
input {
|
101 |
margin: 1rem;
|
|
|
|
|
102 |
font-size: 12pt;
|
103 |
}
|
104 |
|
|
|
106 |
float: left;
|
107 |
font-size: 16px;
|
108 |
margin: 1rem;
|
|
|
109 |
padding: 1rem 2rem;
|
110 |
cursor: pointer;
|
111 |
border-radius: 4px;
|