atlury commited on
Commit
1b06c14
·
verified ·
1 Parent(s): 515a8c3

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +9 -6
index.html CHANGED
@@ -176,7 +176,7 @@
176
 
177
  <script type="module">
178
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
179
- import * as webllm from 'https://esm.run/@mlc-ai/web-llm';
180
 
181
  env.localModelPath = './models';
182
 
@@ -233,7 +233,7 @@
233
  }
234
 
235
 
236
- async function initializePipelines() {
237
  try {
238
  addLog('System: Initializing pipelines...');
239
  [sttPipeline, ttsPipeline] = await Promise.all([
@@ -242,10 +242,10 @@
242
  ]);
243
 
244
  addLog('System: Initializing WebLLM...');
245
- llmChat = new webllm.ChatModule({
246
  model: "TinyLlama-1.1B-Chat-v0.4-q4f16_1-1k"
247
  });
248
- await llmChat.reload();
249
  addLog('System: WebLLM initialized successfully.');
250
 
251
  addLog('System: Digital Human Voice Chat initialized. Click "Begin Call" to start.');
@@ -267,7 +267,10 @@
267
  const transcription = await sttPipeline(audio);
268
  addLog(`User: ${transcription.text}`);
269
 
270
- const reply = await llmChat.generate(transcription.text);
 
 
 
271
 
272
  const botResponse = reply.trim();
273
  addLog(`Bot: ${botResponse}`);
@@ -281,7 +284,7 @@
281
  addLog(`System: Error processing speech: ${error.message}`);
282
  }
283
  }
284
-
285
  function addLog(message) {
286
  const now = new Date();
287
  const timestamp = now.toLocaleTimeString();
 
176
 
177
  <script type="module">
178
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
179
+ import { ChatModule } from 'https://esm.run/@mlc-ai/web-llm';
180
 
181
  env.localModelPath = './models';
182
 
 
233
  }
234
 
235
 
236
+ async function initializePipelines() {
237
  try {
238
  addLog('System: Initializing pipelines...');
239
  [sttPipeline, ttsPipeline] = await Promise.all([
 
242
  ]);
243
 
244
  addLog('System: Initializing WebLLM...');
245
+ llmChat = new ChatModule({
246
  model: "TinyLlama-1.1B-Chat-v0.4-q4f16_1-1k"
247
  });
248
+ await llmChat.init();
249
  addLog('System: WebLLM initialized successfully.');
250
 
251
  addLog('System: Digital Human Voice Chat initialized. Click "Begin Call" to start.');
 
267
  const transcription = await sttPipeline(audio);
268
  addLog(`User: ${transcription.text}`);
269
 
270
+ const reply = await llmChat.generate(transcription.text, {
271
+ temperature: 0.7,
272
+ max_new_tokens: 256
273
+ });
274
 
275
  const botResponse = reply.trim();
276
  addLog(`Bot: ${botResponse}`);
 
284
  addLog(`System: Error processing speech: ${error.message}`);
285
  }
286
  }
287
+
288
  function addLog(message) {
289
  const now = new Date();
290
  const timestamp = now.toLocaleTimeString();