Sarah Ciston commited on
Commit
1e67923
·
1 Parent(s): a019ddd

try plain inference fillMask

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. sketch.js +35 -20
README.md CHANGED
@@ -6,8 +6,8 @@ colorTo: blue
6
  sdk: static
7
  pinned: false
8
  models:
9
- - bert-base-uncased
10
  - distilroberta-base
 
11
  hf_oauth: true
12
  hf_oauth_scopes:
13
  - read-repos
 
6
  sdk: static
7
  pinned: false
8
  models:
 
9
  - distilroberta-base
10
+ - bert-base-uncased
11
  hf_oauth: true
12
  hf_oauth_scopes:
13
  - read-repos
sketch.js CHANGED
@@ -1,7 +1,7 @@
1
  // connect to API via module
2
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
- import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
5
 
6
  /// AUTHORIZATION
7
  // import { textGeneration } from 'https://esm.sh/@huggingface/inference';
@@ -17,11 +17,11 @@ if (!oauthResult) {
17
  // You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
18
  // console.log(oauthResult);
19
  // window.huggingface.variables.OAUTH_CLIENT_SECRET
20
- // const HF_TOKEN = oauthResult.accessToken
21
  // console.log(HF_TOKEN)
22
 
23
- // import { HfInference } from 'https://esm.sh/@huggingface/inference';
24
- // const inference = new HfInference(HF_TOKEN);
25
 
26
  // PIPELINE MODELS
27
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
@@ -199,6 +199,7 @@ new p5(function (p5) {
199
  }
200
 
201
  async function displayModel(m){
 
202
  let modelDisplay = p5.createElement("p", "Results:");
203
  await modelDisplay.html(m)
204
  }
@@ -207,6 +208,20 @@ new p5(function (p5) {
207
 
208
  ///// MODEL STUFF
209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
  // async function runModel(PREPROMPT, PROMPT){
212
  // // inference API version
@@ -350,27 +365,27 @@ new p5(function (p5) {
350
 
351
 
352
  // async function runModel(PROMPTS){
353
- async function runModel(PROMPT){
354
 
355
- // let MODELNAME = "bert-base-uncased"
356
- let MODELNAME = 'distilroberta-base'
357
 
358
- let unmasker = await pipeline('fill-mask', MODELNAME)
359
 
360
- let res = unmasker(PROMPT, top_k=5)
361
 
362
- var modelResult = res
363
 
364
- return modelResult
365
 
366
- // for (let p in PROMPTS){
367
- // var res = unmasker(p)
368
- // console.log(res)
369
 
370
- // var modelResult = res[0].token_str
371
- // console.log(modelResult)
372
 
373
- // resultsArray.push(modelResult)
374
- // }
375
- // return resultsArray
376
- }
 
1
  // connect to API via module
2
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
+ // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
5
 
6
  /// AUTHORIZATION
7
  // import { textGeneration } from 'https://esm.sh/@huggingface/inference';
 
17
  // You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
18
  // console.log(oauthResult);
19
  // window.huggingface.variables.OAUTH_CLIENT_SECRET
20
+ const HF_TOKEN = oauthResult.accessToken
21
  // console.log(HF_TOKEN)
22
 
23
+ import { HfInference } from 'https://esm.sh/@huggingface/inference';
24
+ const inference = new HfInference(HF_TOKEN);
25
 
26
  // PIPELINE MODELS
27
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
 
199
  }
200
 
201
  async function displayModel(m){
202
+ m = str(m)
203
  let modelDisplay = p5.createElement("p", "Results:");
204
  await modelDisplay.html(m)
205
  }
 
208
 
209
  ///// MODEL STUFF
210
 
211
+ async function runModel(PROMPT){
212
+ // let MODELNAME = 'distilroberta-base'
213
+
214
+ let unmasker = await fillMask(PROMPT)
215
+
216
+ console.log(unmasker)
217
+
218
+ // let res = unmasker(PROMPT, top_k=5)
219
+
220
+ var modelResult = [unmasker[0].sequence, unmasker[1].sequence, unmasker[2].sequence]
221
+
222
+ return modelResult
223
+ }
224
+
225
 
226
  // async function runModel(PREPROMPT, PROMPT){
227
  // // inference API version
 
365
 
366
 
367
  // async function runModel(PROMPTS){
368
+ // async function runModel(PROMPT){
369
 
370
+ // // let MODELNAME = "bert-base-uncased"
371
+ // let MODELNAME = 'distilroberta-base'
372
 
373
+ // let unmasker = await pipeline('fill-mask', MODELNAME)
374
 
375
+ // let res = unmasker(PROMPT, top_k=5)
376
 
377
+ // var modelResult = res
378
 
379
+ // return modelResult
380
 
381
+ // // for (let p in PROMPTS){
382
+ // // var res = unmasker(p)
383
+ // // console.log(res)
384
 
385
+ // // var modelResult = res[0].token_str
386
+ // // console.log(modelResult)
387
 
388
+ // // resultsArray.push(modelResult)
389
+ // // }
390
+ // // return resultsArray
391
+ // }