sarahciston commited on
Commit
d6148e1
1 Parent(s): f0ff9a3

try with importing tokenizer

Browse files
Files changed (1) hide show
  1. sketch.js +12 -7
sketch.js CHANGED
@@ -1,6 +1,8 @@
1
 
2
  // IMPORT LIBRARIES TOOLS
3
- import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
 
 
4
 
5
  // skip local model check
6
  env.allowLocalModels = false;
@@ -15,20 +17,23 @@ let PREPROMPT = `You're a friendly pirate. Please complete the phrase and fill i
15
  async function textGenTask(input){
16
  console.log('text-gen task initiated')
17
 
18
- const pipe = await pipeline('text-generation', 'Xenova/TinyLlama-1.1B-Chat-v1.0')
19
-
20
  // const = modelsList = ['Xenova/LaMini-Cerebras-256M', ]
21
 
 
 
 
 
22
  let messages = [
23
  {"role": "system", "content": PREPROMPT},
24
  {"role": "user", "content": input}
25
  ]
26
 
27
- // const prompt = pipe.tokenizer.apply_chat_template(messages, {
28
- // tokenize: false, add_generation_prompt: false,
29
- // });
30
 
31
- var out = await pipe(messages, {
32
  max_new_tokens: 256,
33
  temperature: 0.7,
34
  do_sample: true,
 
1
 
2
  // IMPORT LIBRARIES TOOLS
3
+ import { pipeline, env, AutoTokenizer } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
4
+
5
+
6
 
7
  // skip local model check
8
  env.allowLocalModels = false;
 
17
  async function textGenTask(input){
18
  console.log('text-gen task initiated')
19
 
20
+ let MODEL = 'Xenova/TinyLlama-1.1B-Chat-v1.0'
 
21
  // const = modelsList = ['Xenova/LaMini-Cerebras-256M', ]
22
 
23
+ const pipe = await pipeline('text-generation', MODEL)
24
+ let tokenizer = AutoTokenizer.from_pretrained(MODEL)
25
+
26
+
27
  let messages = [
28
  {"role": "system", "content": PREPROMPT},
29
  {"role": "user", "content": input}
30
  ]
31
 
32
+ const prompt = pipe.tokenizer.apply_chat_template(messages, {
33
+ tokenize: false, add_generation_prompt: false,
34
+ });
35
 
36
+ var out = await pipe(prompt, {
37
  max_new_tokens: 256,
38
  temperature: 0.7,
39
  do_sample: true,