File size: 1,475 Bytes
96d5d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import os
from dotenv import load_dotenv
from transformers import AutoModel
from langchain.storage import LocalFileStore
from langchain.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain.embeddings import CacheBackedEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
from langchain_community.document_loaders import DirectoryLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter



# cache_store = LocalFileStore("./mxbai_cache_v2/")

# Load txt files from dir
loader = DirectoryLoader('../extracted_files', glob="*.txt", loader_cls=TextLoader, show_progress=True)
docs = loader.load()

# Chunking
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=256,
    chunk_overlap=64,
)
chunked = text_splitter.split_documents(docs)

# model = AutoModel.from_pretrained('mixedbread-ai/mxbai-embed-large-v1', trust_remote_code=True) 

model_name = "mixedbread-ai/mxbai-embed-large-v1"
model_kwargs = {'device': 'cpu'}
embeddings_model = HuggingFaceEmbeddings(
    model_name=model_name,
    model_kwargs=model_kwargs,
)

# embeddings_model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")

cached_embedder = CacheBackedEmbeddings.from_bytes_store(
    embeddings_model, cache_store, namespace="mixedbread-ai/mxbai-embed-large-v1")

db = FAISS.from_documents(chunked, cached_embedder)

db.save_local("mxbai_faiss_index_v2")

print("Embeddings saved ...")