update readme
Browse files
README.md
CHANGED
@@ -1335,13 +1335,12 @@ int8 embeddings keep 99.7-100% of the search quality, while reducing your vector
|
|
1335 |
Below example using [usearch](https://github.com/unum-cloud/usearch) to search on int8 embeddings.
|
1336 |
|
1337 |
```python
|
1338 |
-
#Run: pip install cohere datasets numpy usearch
|
1339 |
from datasets import load_dataset
|
1340 |
import numpy as np
|
1341 |
import cohere
|
1342 |
from usearch.index import Index
|
1343 |
|
1344 |
-
co = cohere.Client("
|
1345 |
lang = "simple"
|
1346 |
|
1347 |
#Load at max 1000 chunks + embeddings
|
@@ -1352,11 +1351,13 @@ docs = []
|
|
1352 |
doc_embeddings = []
|
1353 |
for doc in docs_stream:
|
1354 |
docs.append(doc)
|
1355 |
-
doc_embeddings.append(doc['
|
1356 |
if len(docs) >= max_docs:
|
1357 |
break
|
1358 |
|
1359 |
-
doc_embeddings = np.asarray(doc_embeddings)
|
|
|
|
|
1360 |
|
1361 |
#Create the usearch HNSW index on the int8 embeddings
|
1362 |
num_dim = 1024
|
@@ -1374,9 +1375,8 @@ matches = index.search(query_emb, 10)
|
|
1374 |
# Print results
|
1375 |
for match in matches:
|
1376 |
doc_id = match.key
|
1377 |
-
|
1378 |
-
print(
|
1379 |
-
print(row['text'])
|
1380 |
print("----")
|
1381 |
```
|
1382 |
|
@@ -1392,7 +1392,7 @@ import numpy as np
|
|
1392 |
import cohere
|
1393 |
import faiss
|
1394 |
|
1395 |
-
co = cohere.Client("
|
1396 |
lang = "simple"
|
1397 |
|
1398 |
#Load at max 1000 chunks + embeddings
|
@@ -1407,7 +1407,7 @@ for doc in docs_stream:
|
|
1407 |
if len(docs) >= max_docs:
|
1408 |
break
|
1409 |
|
1410 |
-
doc_embeddings = np.asarray(doc_embeddings)
|
1411 |
|
1412 |
#Create the faiss IndexBinaryFlat index
|
1413 |
num_dim = 1024
|
|
|
1335 |
Below example using [usearch](https://github.com/unum-cloud/usearch) to search on int8 embeddings.
|
1336 |
|
1337 |
```python
|
|
|
1338 |
from datasets import load_dataset
|
1339 |
import numpy as np
|
1340 |
import cohere
|
1341 |
from usearch.index import Index
|
1342 |
|
1343 |
+
co = cohere.Client("<<YOUR_API_KEY>>") # Add your cohere API key from www.cohere.com
|
1344 |
lang = "simple"
|
1345 |
|
1346 |
#Load at max 1000 chunks + embeddings
|
|
|
1351 |
doc_embeddings = []
|
1352 |
for doc in docs_stream:
|
1353 |
docs.append(doc)
|
1354 |
+
doc_embeddings.append(doc['emb_int8'])
|
1355 |
if len(docs) >= max_docs:
|
1356 |
break
|
1357 |
|
1358 |
+
doc_embeddings = np.asarray(doc_embeddings, dtype='int8')
|
1359 |
+
|
1360 |
+
print(doc_embeddings.shape, doc_embeddings.dtype)
|
1361 |
|
1362 |
#Create the usearch HNSW index on the int8 embeddings
|
1363 |
num_dim = 1024
|
|
|
1375 |
# Print results
|
1376 |
for match in matches:
|
1377 |
doc_id = match.key
|
1378 |
+
print(docs[doc_id]['title'])
|
1379 |
+
print(docs[doc_id]['text'])
|
|
|
1380 |
print("----")
|
1381 |
```
|
1382 |
|
|
|
1392 |
import cohere
|
1393 |
import faiss
|
1394 |
|
1395 |
+
co = cohere.Client("<<YOUR_API_KEY>>") # Add your cohere API key from www.cohere.com
|
1396 |
lang = "simple"
|
1397 |
|
1398 |
#Load at max 1000 chunks + embeddings
|
|
|
1407 |
if len(docs) >= max_docs:
|
1408 |
break
|
1409 |
|
1410 |
+
doc_embeddings = np.asarray(doc_embeddings, dtype='uint8')
|
1411 |
|
1412 |
#Create the faiss IndexBinaryFlat index
|
1413 |
num_dim = 1024
|