Quantize F16 with llamafile-0.8.6
Browse files- .gitattributes +1 -0
- aya-23-8B.F16.llamafile +3 -0
.gitattributes
CHANGED
@@ -36,3 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
36 |
aya-23-8B.Q6_K.llamafile filter=lfs diff=lfs merge=lfs -text
|
37 |
aya-23-8B.Q5_K_M.llamafile filter=lfs diff=lfs merge=lfs -text
|
38 |
aya-23-8B.Q8_0.llamafile filter=lfs diff=lfs merge=lfs -text
|
|
|
|
36 |
aya-23-8B.Q6_K.llamafile filter=lfs diff=lfs merge=lfs -text
|
37 |
aya-23-8B.Q5_K_M.llamafile filter=lfs diff=lfs merge=lfs -text
|
38 |
aya-23-8B.Q8_0.llamafile filter=lfs diff=lfs merge=lfs -text
|
39 |
+
aya-23-8B.F16.llamafile filter=lfs diff=lfs merge=lfs -text
|
aya-23-8B.F16.llamafile
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eaf60aebc0e817510cdf68292f8031e58faeccead48c003520eb3c11094ff935
|
3 |
+
size 16102799504
|