timep12345 commited on
Commit
c002e8b
·
1 Parent(s): 5643bbe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -7
app.py CHANGED
@@ -3,9 +3,9 @@ import pandas as pd
3
  import json
4
 
5
  from langchain.document_loaders import DataFrameLoader
6
- from langchain.text_splitter import CharacterTextSplitter
7
  from langchain.llms import HuggingFaceHub
8
- from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
9
  from langchain.vectorstores import Chroma
10
  from langchain.chains import RetrievalQA
11
 
@@ -34,13 +34,21 @@ def url_changes(url, pages_to_visit, urls_to_scrape, repo_id):
34
  result = json.loads(result)
35
 
36
  results_df = pd.concat([results_df, pd.DataFrame.from_records([result])])
37
-
38
- loader = DataFrameLoader(results_df, page_content_column="text")
 
 
39
  documents = loader.load()
40
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
 
 
41
  texts = text_splitter.split_documents(documents)
42
- embeddings = SentenceTransformerEmbeddings(model_name="jhgan/ko-sroberta-multitask")
43
- db = Chroma.from_documents(texts, embeddings)
 
 
 
 
44
  retriever = db.as_retriever()
45
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature":0.1, "max_new_tokens":250})
46
  global qa
 
3
  import json
4
 
5
  from langchain.document_loaders import DataFrameLoader
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain.llms import HuggingFaceHub
8
+ from langchain.embeddings import HuggingFaceEmbeddings
9
  from langchain.vectorstores import Chroma
10
  from langchain.chains import RetrievalQA
11
 
 
34
  result = json.loads(result)
35
 
36
  results_df = pd.concat([results_df, pd.DataFrame.from_records([result])])
37
+ results_df.to_csv("./data.csv")
38
+
39
+ df = pd.read_csv("./data.csv")
40
+ loader = DataFrameLoader(df, page_content_column="text")
41
  documents = loader.load()
42
+ print(f"{len(documents)} documents loaded")
43
+
44
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
45
  texts = text_splitter.split_documents(documents)
46
+ print(f"documents splitted into {len(texts)} chunks")
47
+
48
+ embeddings = HuggingFaceEmbeddings(model_name="jhgan/ko-sroberta-multitask")
49
+
50
+ persist_directory = './vector_db'
51
+ db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory)
52
  retriever = db.as_retriever()
53
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature":0.1, "max_new_tokens":250})
54
  global qa