Spaces:
Sleeping
Sleeping
import os | |
import torch | |
from constants import CHROMA_SETTINGS | |
from langchain.document_loaders import PDFMinerLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.vectorstores import Chroma | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
checkpoint = "MBZUAI/LaMini-T5-738M" | |
tokenizer = AutoTokenizer.from_pretrained(checkpoint) | |
model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.float32) | |
persist_directory = "db" | |
def main(): | |
for root, dirs, files in os.walk("docs"): | |
for file in files: | |
if file.endswith(".pdf"): | |
print(f"Ingesting file: {file}") | |
loader = PDFMinerLoader(os.path.join(root, file)) | |
documents = loader.load() | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) | |
texts = text_splitter.split_documents(documents) | |
def embedding_function(text): | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(model.device) | |
with torch.no_grad(): | |
embeddings = model.encoder(**inputs).last_hidden_state.mean(dim=1).cpu().numpy() | |
return embeddings | |
db = Chroma.from_documents(texts, embedding_function=embedding_function, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS) | |
db.persist() | |
db = None | |
if __name__ == "__main__": | |
main() | |