atlury commited on
Commit
a8cfe04
1 Parent(s): 3a76a4e

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +4 -16
index.html CHANGED
@@ -3,7 +3,7 @@
3
  <head>
4
  <meta charset="utf-8">
5
  <meta name="viewport" content="width=device-width, initial-scale=1">
6
- <title>llama-cpp-wasm single thread</title>
7
  <link rel="icon" type="image/png" href="favicon.png" />
8
 
9
  <!-- picocss -->
@@ -16,30 +16,18 @@
16
  <body>
17
  <header class="container">
18
  <hgroup>
19
- <h1><a href="/">llama-cpp-wasm</a> &nbsp; &#128034; <mark>single thread</mark> wasm32 </h2>
20
- <br />
21
-
22
- <p> WebAssembly (Wasm) Build and Bindings for <a href="https://github.com/ggerganov/llama.cpp" target="_blank">llama.cpp</a>. </p>
23
- <br />
24
-
25
- <p> This demonstration enables you to run LLM models directly in your browser utilizing JavaScript, WebAssembly, and llama.cpp. </p>
26
- <br />
27
-
28
- <p> Repository: <a href="https://github.com/tangledgroup/llama-cpp-wasm"> https://github.com/tangledgroup/llama-cpp-wasm </a></p>
29
- <br />
30
-
31
  <p> When you click <b>Run</b>, model will be first downloaded and cached in browser. </p>
32
  </hgroup>
33
  </header>
34
 
35
  <main class="container">
36
  <section>
37
- <h2> Demo </h2>
38
 
39
  <label> Model: </label>
40
 
41
  <select id="model" name="model" aria-label="Select model" required>
42
- <!-- <option value="https://huggingface.co/Qwen/Qwen1.5-0.5B-Chat-GGUF/resolve/main/qwen1_5-0_5b-chat-q3_k_m.gguf" selected>Qwen/Qwen1.5-0.5B-Chat Q3_K_M (350 MB)</option> -->
43
  <option value="https://huggingface.co/afrideva/TinyMistral-248M-SFT-v4-GGUF/resolve/main/tinymistral-248m-sft-v4.q8_0.gguf">tinymistral-248m-sft-v4 q8_0 (265.26 MB)</option>
44
  <option value="https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf">TinyLlama/TinyLlama-1.1B-Chat-v1.0 Q4_K_M (669 MB)</option>
45
  <option value="https://huggingface.co/Qwen/Qwen1.5-1.8B-Chat-GGUF/resolve/main/qwen1_5-1_8b-chat-q3_k_m.gguf">Qwen/Qwen1.5-1.8B-Chat Q3_K_M (1.02 GB)</option>
@@ -50,7 +38,7 @@
50
 
51
  <label> Prompt: </label>
52
 
53
- <textarea id="prompt" name="prompt" rows="5">Suppose Alice originally had 3 apples, then Bob gave Alice 7 apples, then Alice gave Cook 5 apples, and then Tim gave Alice 3x the amount of apples Alice had. How many apples does Alice have now? Let’s think step by step.</textarea>
54
 
55
  <label> Result: </label>
56
 
 
3
  <head>
4
  <meta charset="utf-8">
5
  <meta name="viewport" content="width=device-width, initial-scale=1">
6
+ <title>Edge LLM v1</title>
7
  <link rel="icon" type="image/png" href="favicon.png" />
8
 
9
  <!-- picocss -->
 
16
  <body>
17
  <header class="container">
18
  <hgroup>
 
 
 
 
 
 
 
 
 
 
 
 
19
  <p> When you click <b>Run</b>, model will be first downloaded and cached in browser. </p>
20
  </hgroup>
21
  </header>
22
 
23
  <main class="container">
24
  <section>
25
+ <h2> Single Thread </h2>
26
 
27
  <label> Model: </label>
28
 
29
  <select id="model" name="model" aria-label="Select model" required>
30
+ <option value="https://huggingface.co/Qwen/Qwen1.5-0.5B-Chat-GGUF/resolve/main/qwen1_5-0_5b-chat-q3_k_m.gguf" selected>Qwen/Qwen1.5-0.5B-Chat Q3_K_M (350 MB)</option>
31
  <option value="https://huggingface.co/afrideva/TinyMistral-248M-SFT-v4-GGUF/resolve/main/tinymistral-248m-sft-v4.q8_0.gguf">tinymistral-248m-sft-v4 q8_0 (265.26 MB)</option>
32
  <option value="https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf">TinyLlama/TinyLlama-1.1B-Chat-v1.0 Q4_K_M (669 MB)</option>
33
  <option value="https://huggingface.co/Qwen/Qwen1.5-1.8B-Chat-GGUF/resolve/main/qwen1_5-1_8b-chat-q3_k_m.gguf">Qwen/Qwen1.5-1.8B-Chat Q3_K_M (1.02 GB)</option>
 
38
 
39
  <label> Prompt: </label>
40
 
41
+ <textarea id="prompt" name="prompt" rows="5">Good morning how are you doing</textarea>
42
 
43
  <label> Result: </label>
44