Sarah Ciston
commited on
Commit
·
5fd064e
1
Parent(s):
214fdb1
try again w token new auth, model name change
Browse files- README.md +3 -1
- sketch.js +7 -6
- tutorial.md +2 -0
README.md
CHANGED
@@ -9,10 +9,12 @@ hf_oauth: true
|
|
9 |
hf_oauth_scopes:
|
10 |
- read-repos
|
11 |
- write-repos
|
|
|
12 |
models:
|
13 |
# - gpt-3.5-turbo
|
14 |
# - Xenova/distilgpt2
|
15 |
-
- HuggingFaceH4/zephyr-7b-gemma-v0.1
|
|
|
16 |
# - openai-community/gpt2
|
17 |
# - meta-llama/Meta-Llama-3-70B-Instruct
|
18 |
# - Xenova/detr-resnet-50
|
|
|
9 |
hf_oauth_scopes:
|
10 |
- read-repos
|
11 |
- write-repos
|
12 |
+
- inference-api
|
13 |
models:
|
14 |
# - gpt-3.5-turbo
|
15 |
# - Xenova/distilgpt2
|
16 |
+
# - HuggingFaceH4/zephyr-7b-gemma-v0.1
|
17 |
+
- HuggingFaceH4/zephyr-7b-beta
|
18 |
# - openai-community/gpt2
|
19 |
# - meta-llama/Meta-Llama-3-70B-Instruct
|
20 |
# - Xenova/detr-resnet-50
|
sketch.js
CHANGED
@@ -13,13 +13,13 @@ if (!oauthResult) {
|
|
13 |
|
14 |
// You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
|
15 |
// console.log(oauthResult);
|
16 |
-
const
|
17 |
-
console.log(
|
18 |
|
19 |
// import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
20 |
// import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
|
21 |
import { HfInference } from 'https://esm.sh/@huggingface/inference';
|
22 |
-
const inference = new HfInference(
|
23 |
|
24 |
// PIPELINE MODELS
|
25 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
@@ -223,12 +223,13 @@ new p5(function (p5) {
|
|
223 |
async function runModel(PREPROMPT, PROMPT){
|
224 |
// inference API version, not working in spaces
|
225 |
|
226 |
-
let MODELNAME = "HuggingFaceH4/zephyr-7b-gemma-v0.1"
|
|
|
227 |
// let MODELNAME = "openai-community/gpt2"
|
228 |
// let MODELNAME = 'mistral_inference'
|
229 |
|
230 |
let out = await inference.textGeneration({
|
231 |
-
accessToken:
|
232 |
model: MODELNAME,
|
233 |
messages: [{
|
234 |
role: "system",
|
@@ -237,7 +238,7 @@ async function runModel(PREPROMPT, PROMPT){
|
|
237 |
role: "user",
|
238 |
content: PROMPT
|
239 |
}],
|
240 |
-
max_new_tokens:
|
241 |
});
|
242 |
|
243 |
console.log(out)
|
|
|
13 |
|
14 |
// You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
|
15 |
// console.log(oauthResult);
|
16 |
+
const HFAUTH = oauthResult.accessToken
|
17 |
+
console.log(HFAUTH)
|
18 |
|
19 |
// import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
20 |
// import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
|
21 |
import { HfInference } from 'https://esm.sh/@huggingface/inference';
|
22 |
+
const inference = new HfInference(HFAUTH);
|
23 |
|
24 |
// PIPELINE MODELS
|
25 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
|
|
223 |
async function runModel(PREPROMPT, PROMPT){
|
224 |
// inference API version, not working in spaces
|
225 |
|
226 |
+
// let MODELNAME = "HuggingFaceH4/zephyr-7b-gemma-v0.1"
|
227 |
+
let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
|
228 |
// let MODELNAME = "openai-community/gpt2"
|
229 |
// let MODELNAME = 'mistral_inference'
|
230 |
|
231 |
let out = await inference.textGeneration({
|
232 |
+
// accessToken: HFAUTH,
|
233 |
model: MODELNAME,
|
234 |
messages: [{
|
235 |
role: "system",
|
|
|
238 |
role: "user",
|
239 |
content: PROMPT
|
240 |
}],
|
241 |
+
max_new_tokens: 150
|
242 |
});
|
243 |
|
244 |
console.log(out)
|
tutorial.md
CHANGED
@@ -144,6 +144,8 @@ Morgan, Yasmin. 2022. "AIxDesign Icebreakers, Mini-Games & Interactive Exercises
|
|
144 |
|
145 |
Tutorial 1:
|
146 |
|
|
|
|
|
147 |
#### X. Create a class instance of p5 in `sketch.js`
|
148 |
|
149 |
Because we are going to use several other libraries alongside p5.js, it will be necessary and helpful to use p5.js in "Instance Mode." You may have seen this before in this [Multiple Canvases](https://p5js.org/examples/advanced-canvas-rendering-multiple-canvases/) example.
|
|
|
144 |
|
145 |
Tutorial 1:
|
146 |
|
147 |
+
<!-- Play with different models: https://huggingface.co/chat/ -->
|
148 |
+
|
149 |
#### X. Create a class instance of p5 in `sketch.js`
|
150 |
|
151 |
Because we are going to use several other libraries alongside p5.js, it will be necessary and helpful to use p5.js in "Instance Mode." You may have seen this before in this [Multiple Canvases](https://p5js.org/examples/advanced-canvas-rendering-multiple-canvases/) example.
|