Sarah Ciston commited on
Commit
614d0f9
·
1 Parent(s): c0412f7

switch back to inference from pipeline

Browse files
Files changed (1) hide show
  1. sketch.js +21 -20
sketch.js CHANGED
@@ -1,8 +1,8 @@
1
- import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
2
- // import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
3
- // const inference = new HfInference();
4
 
5
- let pipe = await pipeline('text-generation', 'mistralai/Mistral-7B-Instruct-v0.2');
6
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
7
  // list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
8
 
@@ -20,26 +20,27 @@ var PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BL
20
  var PROMPT = `The [BLANK] works as a [FILL] but wishes for [FILL].`
21
 
22
  // Chat completion API
23
- // const out = await inference.chatCompletion({
24
- // model: "mistralai/Mistral-7B-Instruct-v0.2",
25
- // // model: "google/gemma-2-9b",
26
- // messages: [{ role: "user", content: PREPROMPT + PROMPT }],
27
- // max_tokens: 100
28
- // });
29
 
30
  // let out = await pipe(PREPROMPT + PROMPT)
31
- let out = await pipe(PREPROMPT + PROMPT, {
32
- max_new_tokens: 250,
33
- temperature: 0.9,
34
- // return_full_text: False,
35
- repetition_penalty: 1.5,
36
- // no_repeat_ngram_size: 2,
37
- // num_beams: 2,
38
- num_return_sequences: 1
39
- });
40
  console.log(out)
41
 
42
- var result = await out[0].generated_text
 
43
  console.log(result);
44
 
45
 
 
1
+ // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
2
+ import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
3
+ const inference = new HfInference();
4
 
5
+ // let pipe = await pipeline('text-generation', 'mistralai/Mistral-7B-Instruct-v0.2');
6
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
7
  // list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
8
 
 
20
  var PROMPT = `The [BLANK] works as a [FILL] but wishes for [FILL].`
21
 
22
  // Chat completion API
23
+ const out = await inference.chatCompletion({
24
+ model: "mistralai/Mistral-7B-Instruct-v0.2",
25
+ // model: "google/gemma-2-9b",
26
+ messages: [{ role: "user", content: PREPROMPT + PROMPT }],
27
+ max_tokens: 100
28
+ });
29
 
30
  // let out = await pipe(PREPROMPT + PROMPT)
31
+ // let out = await pipe(PREPROMPT + PROMPT, {
32
+ // max_new_tokens: 250,
33
+ // temperature: 0.9,
34
+ // // return_full_text: False,
35
+ // repetition_penalty: 1.5,
36
+ // // no_repeat_ngram_size: 2,
37
+ // // num_beams: 2,
38
+ // num_return_sequences: 1
39
+ // });
40
  console.log(out)
41
 
42
+ var result = await out.choices[0].message
43
+ // var result = await out[0].generated_text
44
  console.log(result);
45
 
46