Update README.md
Browse files
README.md
CHANGED
@@ -50,6 +50,12 @@ quantized_by: Second State Inc.
|
|
50 |
```
|
51 |
|
52 |
- Context size: `4096`
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
- Run as LlamaEdge service
|
55 |
|
@@ -57,7 +63,7 @@ quantized_by: Second State Inc.
|
|
57 |
wasmedge --dir .:. --nn-preload default:GGML:AUTO:Meta-Llama-3-8B-Instruct-Q5_K_M.gguf \
|
58 |
llama-api-server.wasm \
|
59 |
--prompt-template llama-3-chat \
|
60 |
-
--
|
61 |
--model-name Llama-3-8b
|
62 |
```
|
63 |
<!--
|
@@ -67,3 +73,5 @@ quantized_by: Second State Inc.
|
|
67 |
wasmedge --dir .:. --nn-preload default:GGML:AUTO:Llama-2-7b-chat-hf-Q5_K_M.gguf llama-chat.wasm -p llama-2-chat
|
68 |
``` -->
|
69 |
|
|
|
|
|
|
50 |
```
|
51 |
|
52 |
- Context size: `4096`
|
53 |
+
|
54 |
+
- Quick Start
|
55 |
+
|
56 |
+
```
|
57 |
+
bash <(curl -sSfL 'https://raw.githubusercontent.com/LlamaEdge/LlamaEdge/main/run-llm.sh') --model llama-3-8b-instruct
|
58 |
+
```
|
59 |
|
60 |
- Run as LlamaEdge service
|
61 |
|
|
|
63 |
wasmedge --dir .:. --nn-preload default:GGML:AUTO:Meta-Llama-3-8B-Instruct-Q5_K_M.gguf \
|
64 |
llama-api-server.wasm \
|
65 |
--prompt-template llama-3-chat \
|
66 |
+
--ctx-size 4096 \
|
67 |
--model-name Llama-3-8b
|
68 |
```
|
69 |
<!--
|
|
|
73 |
wasmedge --dir .:. --nn-preload default:GGML:AUTO:Llama-2-7b-chat-hf-Q5_K_M.gguf llama-chat.wasm -p llama-2-chat
|
74 |
``` -->
|
75 |
|
76 |
+
|
77 |
+
|