marcellopoliti commited on
Commit
b6bc4e2
1 Parent(s): f0ba710

bug fix video upload

Browse files
.streamlit/secrets.toml CHANGED
@@ -1,2 +1,3 @@
1
  password = "brianknowsai"
2
- OPENAI_API_KEY = "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"
 
 
1
  password = "brianknowsai"
2
+ OPENAI_API_KEY = "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"
3
+ BRIAN_API_KEY="brian_Hun5m3s59XSvopywo"
generate_kb.py CHANGED
@@ -83,6 +83,8 @@ def add_links_to_knowledge_base(
83
  urls: list,
84
  chunk_size: int = 2_000,
85
  pdf_optional_link=None,
 
 
86
  pdf_title=None,
87
  embedding_fct=default_embedding_function,
88
  ):
@@ -95,6 +97,11 @@ def add_links_to_knowledge_base(
95
  for md in metadatas:
96
  md["source"] = pdf_optional_link
97
  md["title"] = pdf_title
 
 
 
 
 
98
  cleaned_contents = [
99
  re.sub(r"\n+", " ", content) for content in contents
100
  ] # clean text a bit
 
83
  urls: list,
84
  chunk_size: int = 2_000,
85
  pdf_optional_link=None,
86
+ youtube_optional_link=None,
87
+ video_title=None,
88
  pdf_title=None,
89
  embedding_fct=default_embedding_function,
90
  ):
 
97
  for md in metadatas:
98
  md["source"] = pdf_optional_link
99
  md["title"] = pdf_title
100
+
101
+ if youtube_optional_link and video_title:
102
+ for md in metadatas:
103
+ md["source"] = youtube_optional_link
104
+ md["title"] = video_title
105
  cleaned_contents = [
106
  re.sub(r"\n+", " ", content) for content in contents
107
  ] # clean text a bit
pages/manage_knowledge_box.py CHANGED
@@ -16,7 +16,6 @@ open_ai_key = "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"
16
 
17
  st.title("Get knowledge boxes")
18
 
19
-
20
  if st.button("Get current knowledge bases"):
21
  kbs = get_current_knowledge_bases(client=client)
22
  st.json(kbs)
@@ -53,7 +52,9 @@ if len(st.session_state["df"]) != 0:
53
  st.text(f"unique urls: {len(unique_df)}")
54
  st.dataframe(unique_df)
55
 
56
-
 
 
57
  st.header("Remove a split")
58
  id = st.text_input("Insert a split id")
59
  if st.button("Remove Id from collection"):
@@ -64,6 +65,9 @@ if st.button("Remove Id from collection"):
64
  st.error(f"id {id} not in kb")
65
 
66
 
 
 
 
67
  st.header("Remove url from collection")
68
  url = st.text_input("remove url")
69
  if st.button("Remove url from collection"):
@@ -75,6 +79,9 @@ if st.button("Remove url from collection"):
75
  st.error(str(e))
76
 
77
 
 
 
 
78
  st.header("Add url to existing collection")
79
  url_text = st.text_input("Insert a url link")
80
  if st.button("add url to collection"):
@@ -107,7 +114,9 @@ if st.button("add pdf"):
107
  # Clean up: delete the temporary file
108
  os.remove(tmp_path)
109
 
110
-
 
 
111
  st.header("Add csv to existing collection")
112
  uploaded_file = st.file_uploader("Choose a CSV file", type=["csv"])
113
  df = None
@@ -131,6 +140,8 @@ if uploaded_file is not None:
131
  #############################
132
  ########## YOUTUBE ##########
133
  #############################
 
 
134
  def transcribe_audio(audio_path, chunk_length=10000):
135
  """
136
  Transcribe audio by breaking it into chunks using wave and numpy.
@@ -210,31 +221,35 @@ def download_and_transcribe_youtube(youtube_url):
210
 
211
  # audio_file = open("video.wav", "rb")
212
  text = transcribe_audio("video.wav")
213
- st.write(text)
214
- # save text
215
- # out_path = os.path.join("../data/files", video_title + ".txt")
216
- # with open(out_path, "w+") as f_out:
217
- # f_out.write(text["text"])
 
 
 
 
 
 
 
 
 
218
 
219
 
220
  st.header("Add youtube video to collection")
221
- video_url = st.text_input("Youtube video url")
 
 
 
222
 
 
 
223
  if st.button("Add video"):
224
  # Create a temporary file
225
  # Write the uploaded PDF to the temporary file
226
- download_and_transcribe_youtube(video_url)
227
-
228
- # tmp_file.write(uploaded_file.getvalue())
229
- # tmp_path = tmp_file.name
230
- # print("PATH: ", tmp_path)
231
- # urls = [tmp_path]
232
- # res = add_links_to_knowledge_base(
233
- # client=client,
234
- # kb_name=collection_name,
235
- # urls=urls,
236
- # pdf_optional_link=pdf_optional_link,
237
- # pdf_title=pdf_title,
238
- # )
239
- # st.write(res)
240
- # Clean up: delete the temporary file
 
16
 
17
  st.title("Get knowledge boxes")
18
 
 
19
  if st.button("Get current knowledge bases"):
20
  kbs = get_current_knowledge_bases(client=client)
21
  st.json(kbs)
 
52
  st.text(f"unique urls: {len(unique_df)}")
53
  st.dataframe(unique_df)
54
 
55
+ #############################
56
+ #### REMOVE A SPLIT #########
57
+ #############################
58
  st.header("Remove a split")
59
  id = st.text_input("Insert a split id")
60
  if st.button("Remove Id from collection"):
 
65
  st.error(f"id {id} not in kb")
66
 
67
 
68
+ #############################
69
+ #### REMOVE URL ############
70
+ #############################
71
  st.header("Remove url from collection")
72
  url = st.text_input("remove url")
73
  if st.button("Remove url from collection"):
 
79
  st.error(str(e))
80
 
81
 
82
+ #############################
83
+ ########### ADD URL #########
84
+ #############################
85
  st.header("Add url to existing collection")
86
  url_text = st.text_input("Insert a url link")
87
  if st.button("add url to collection"):
 
114
  # Clean up: delete the temporary file
115
  os.remove(tmp_path)
116
 
117
+ #############################
118
+ ########### ADD CSV #########
119
+ #############################
120
  st.header("Add csv to existing collection")
121
  uploaded_file = st.file_uploader("Choose a CSV file", type=["csv"])
122
  df = None
 
140
  #############################
141
  ########## YOUTUBE ##########
142
  #############################
143
+
144
+
145
  def transcribe_audio(audio_path, chunk_length=10000):
146
  """
147
  Transcribe audio by breaking it into chunks using wave and numpy.
 
221
 
222
  # audio_file = open("video.wav", "rb")
223
  text = transcribe_audio("video.wav")
224
+ f_out_path = f"{video_title}.txt"
225
+ with open(f"{video_title}.txt", "w") as f_out:
226
+ f_out.write(text)
227
+ urls = [f_out_path]
228
+ add_links_to_knowledge_base(
229
+ client=client,
230
+ kb_name=collection_name,
231
+ urls=urls,
232
+ youtube_optional_link=youtube_url,
233
+ video_title=video_title,
234
+ )
235
+ os.remove(f"{video_title}.txt")
236
+ os.remove("video.wav")
237
+ os.remove("temp_chunk.wav")
238
 
239
 
240
  st.header("Add youtube video to collection")
241
+ st.image(
242
+ "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAVsAAACRCAMAAABaFeu5AAAAwFBMVEX////NIB8AAADKAADMGBf//PzNHRzWUVH23d3LCwjjlZUlJSXLDw2np6fi4uLvx8ZycnLPKSfghoY9PT3no6OPj49oaGicnJzZZmXvwsFPT0/66+uvr6/pqanaamr99vbz0tLprq755+fsurrSPDseHh7WVVTu7u5/f3/hjY3fhIPQMzLX19fAwMDbc3K3t7fQ0NB3d3fXW1rUSEguLi5cXFxERETUSUnlnJsUFBSSkpLdeHj119fSNzdeXl41NTXZRimFAAAOiklEQVR4nO2dfXvSPBTGwZRCeTqRjSnIgPIiE3BMQR0K0+//rZ4WaJs7OUlbXtZ2cv/hddmGEH5L05OTk5NC4VWp1HPVdjWoVDaO49RqtVarNdyq76njquH9s/3f7oZbwi3nlt5UKgPvw14lpbR/SkpyCQ4qLrdhv7GqTq6n8+aivBzN6kXTsC0WqThFbNso1uuz0XLc7M6nk8mq0R+6/CsDl/qrwz7YtDqrybpZXj7XbQ6CS8Hl4Mrcqngq7arzKjbcL7C4v4dlzpbl5nqy6rQ2g7SxHCun0Z0FvWkL8WQED8S+Bb7DXe+unLQBHara3P0NRso0NTINt33dWtqYDlB/xuzscvVl2qzeSBtVQrXqLPtgdzKZ2U8bVwL1Frkh68lk5XbayOLKsey0cSWUwVppQ4unfq467U4my8Wou2JpgzpIbJI2uGjlFG0e4LbyitaFm3FzoZ3DsTYQy/Y8eJE3C4GX8ZQ2Pp1q+R0RPLFh2gA1Ghlp4zlKZj1tgGrl+EW2U4Y7bjnf3dYdcUdpI1Spkvdu63bcTdoQFZpYaaM5WvY6bYgKZdgPHlemnc0lNSf/Q4I7KOzXIUog6udGlzihJvy8wVsGjKskv93kP3iG58QfFN6AbuVfe4Ulzgz3mfupxmI6nV7H0zSBdWHOJuEHJ7PTwzWLux9zC+Ru5F/7Bwq8Py/aNj8kJPJ7JBhLjDH/wXPYfKyyrfoLdsue1Gi8/3gUukgNgW0n/gdLSdiW+U+ehe2+V/wCdg9iowX2Zx4Spvy4mV+29nxX92dg91ls9Ae4/fE4dJECX0J+2foD7leA95/Y6J9w++44dFHqAaH8si2y/ZovPvT3Qpu1d0+tzath6+wqx0FB6JnYqwkz4qQavhq2+6YjPsHIQhPtw7HwIrSyqAbGUcbYWtV97fjYY5u/w723R7KLEpgJeWZrd/e1vwN+X/gvvodb34+GF6EF/M4csw2mJw/q5/4RbhEz4tPqGeafOWZrzvx2AUAwYb+pu/Q5hA7GJHPerLEN3IzoMuC/+JNmKD690Lwt2lNvJ0goLDuEe8MEjrAXYFtkvvMAn/yv4fe+hRvSnO3Uagudz2YgmG9XhO0fCX73i7Ct7KvHNxY3qt6poJ9H2rUys1iKXVavF2Hr+PXDxJabIKgHi7NIG/WRM7bBCIb+73BiC5ffnZ3t8BWxDWwcHBQCHy36FyX348nVSYPt01nYBhMzYfr1zb+K/sXzL15Wdevn+WJrXwdfABQ/+Vc/8ld/nB1tYa0zpHLGdh58ARpbvtuAHinOp67uZ74AW9M04my6jLO7lV+T+4/HuPczooOMcN2W7r98/frldD7d8cFsTV5Rl2m2JmPmaPlsM+0sxHBNaaNe9/YUW9rWckFh4MT9s7sG/sWfIoiHb3/9eze3tOl7+/NjoJ9hv397E17+yA81I11v0LHd72reKaSouEyxNdisMfC+oLSZMhU0F/+44bS9Yr12bfXE1O8HPlIU++ju2l/+0hVQK6Ff9w3t2n1HVwAD0CeufPFAtkLIa3AnNOA9Bc5hgi175rbktsv0eMPsFS6DtyfKTm6aXDkAtXXKoGEGrlucr+0lD8hx2PILdNoh9BC2sIe5qmRrsipUUJgTLZEKeeqtFXszICYMPF7bTgheBnDdosM3kBQXkhO2huWILZeNB6NYEQtt1VLAZVwXByfuR5r2XmCa8RKX0xKy7aXD1jbljbii16hoL+WomH1DDHJ4ZnylQMn7GRAUwg0J71VopRdeQraDVNguDYqaEAZsLBVkXbUtqufCXihA9qAmgJM1QeiGTMhWb7KejS25IQw7rllU9VpPG6rdjB9CYHi9Fd5XITOcZUgCYywXbBV77ZoQ4aOPsm8QDQcbBcyC74J/MUSmHGyDD4ZKyJb8+5+drUJ9rn4rapfuUh5y8buB2j2Ov0EhIfROFu8tS8hWHzP+wmy5wd+0dSOCouX43eDEfQSIgWdMWJwkxC9lJmSr3xD5wmy5ddHIbkvtN8REFTAofAbSYWcUSH6+u7vFIFPe75CQrdY1fla2retmtyF0zjBWgqGN5ky6zXkDr/Wlpgs7+G74n/yDBxAUwanxfiUC579cQFlO2A5NZhs2Yzg5CHZeCLufu8wyvNLQL2XTXGD7+41C4YQLMAaP/zu6cFK28h//Rdh29zMrYT9j0BprxV/202aYDPq5NCgIwRXK91ToKYD3XRAKgp6HnLFtKlw7weiPl4OJAroXrkWnjRi4omJLL05y5tYPunQu2IZrkRbQCr4A4iLCr8VapPFMDLjCSNxA4asfX3fhB2GuFkY2JWSrXYp8AbZGl7/uG2FYecjQNFTNIdnimyrQHV2Ac9/AnC4cQfLGFpwG/usJiXNTMOjP0stMChSk2YYPOTjLOIMAmIcQc8aWrsYG65aLvkbTrC44bCS2pGOWc26Bj0HF9pauLgdsDaoaTKhWVbEV16MkthiJuxf37MO4eqX4XDiJyxtbm2Q7JCsR2YrhORJbYVfOTpzrFizg3+F16M9hbNPrYNsiKxHZLqLYCsF1W/GLhTB1UMU7hpOHf4ltM5ItscjIB+L/k2zpSkS2YtyKzBZnWFvxgfgXtkewBX/NVr/4uxe2VRbE6SRmK62GwQrYhe0xbKXlMFgAu7BVppeJwRaD7t4IgfgXtkrFYSs4cTEQ/8JWqThsBScuBuJf2CoVh62QYwXvXdgqFYstWmF478JWqQvbf5Zt2ms6F7Z7ZZ4ttYn+whareR1s04n9SMa2v2iS6oppHcnM2Be2OrZVZtAS45szxlafxf2Me0mSsI2bVpo8FCo9tunEiP4bbNOJbT4XW+oVmB7bdGLyz8XWubA9G1tqO1p6bNPZA3U4WzigWrITqD0q6bFNae/eoWytaqcRqLMU7VvqZM702Ka2n/cwthHxCdT+kwtbrOZAttZZ2B4cs6Q/kyQ1tgfFLMEe/0A6trCYpoq/PZztc47ZCjlL6FPMdGwhekEVIxpGNCRlq03V9QJs6RhRcAwo42+FrADmc4GQji30TxXbsD8nZXtoLqBTsSWrwfhbPm5cF39L7w/UsX2gYamuJ2V7aA4rJVv6cVaxFTaS7avhMn0VwMeMbAUfI0by+9KxhQV21bssXHZXsIVRm2c7OS1bEzMjRLLlcnoVuP0OC/4q508CK6skvIfxL+JLxxZin7kMSbDBJ4yFptmqD5Sg9sofwVbYmR/JlsEmPT+DAo7CYQ5j/3yMncR5j0Ukt9GzxYim8DKkIA4RkGyFMFSerXZRJzFbcVVFwTbc0Yh/i2AGrrC18HQ9cb5OHzuvZQvJQYKe+KCA9Y0ofS9srebZap3jOrZoTDKiFxaUbB0/HZiBr/bg6UdDIbwMDgOx6fRhp1q2mMx1//RjT+SCSomEbm8FtMBW68DV5V5j0Mim7T7I8hq2aj9ve8y8wdIQvILBChMOw96qjinXL74q6BVMLVvhgd5u03tAXFxQ6W+x8L2c7IZnq3WExWc7eGZsIbv41Hv8a02DsScHLwaDv4nJuAutJ4tZZYGdtL2MzHilZSvFld/8FC7wnxB3T3wSywpstY4wLVth8t6mJvPa3BTyJ0KDUHq+e20xX630xJFusAi25A40XrdJCgtsC7rDG7Vs6bxoqIR5P7g5BTnHAs2lbehktmA9W+EQGFl8VtGInEwyW93ZjVq21MKfqGRs+QMjIg+akAYzxaHoEWwV+6l9YcpLugy8EIGtLkmrjq0l2gT7MnHmZYo8P/yic2QyIKndwhTPVwTbgpRBlJeQ2I5MePUIL0Rgq5v06tji6Zq+2rH8CQuaGzTFJusPJM95jCZZMIotJqJA/RJGmSuizJ3an6Bd6dPmFqa8/O16vLga8mj4Hi6AsSnJaiciPxQ95Y1mq06/9ktM4UxsBXzU+Gq0EzM9Wzkis+1arHHYjtkT8d6ZivaqOo/VhsgkSk/LYrBVZbu8kTuPlF7Is37VbHWRNRE5sSWryDLi+RjHhj2TDCa5IWwultlrSCVpVbxcY7ClcwuTJ3BhbX+3HVvNVmfg6nO524JPr+P94rixdoaIokLwsmZUUsbSmmwyuYIuZMJWnfNyL6W2+aM4k4+fW+yNCGALe1p1Rhj6naRAEca7TgaL7V0VW3gzee94k3X5rkt2RfcvMBeJlTom+YYwWYHU1QdedJmCN33lLN1fv9WnHd75U+Lf/mh8/5v7AsxjLtngvBq8JF8vG/kkK+u9+8We8hEEwUqsOeIv7xYMbLZ29h+vjRWpmN1C5U6It1eb2oqSCjMhge4fPnx+//7bh8eIUwjePl5dPcY7alK7nQTiWOQ/gsGMebVRnRfDe3BIF3eOAxUQ45ZdrK/XC6ZMk1/cHUHw1F1Pp+tm3W2D6imTfHBZ0BEnYnjMbItZunlzhAzbtqNPKTG8YrZ2vZ8MtEtd2ibnRaaVNkZS2uXIvOj44fYs0sfl50T0okPqSnJeaWZFxtllQForLB/K6JAQtaEkF4rlTU5FOv94LiSElWVJ+t3oOVAmJw47lY4w/rMgM6tvMk/ayKXsizqKKzsS1/pzJcEVmjXl2lSImRIgNU3yC1e7sJYJNfMK14oVVpKuFvmEmwe03mEhaXM6QGwR/cOyoIbO/Z9JmYwOSsigKs+qZatsyrYy60Yg1JF3d2dWBptneDpGqTNTr/dlSKbNmnGiVDMmZ+qtp2Z4Fc00LFa8VhzomXlV+tPRdvnc5s6LT1veMfb2tlXjqpM2oSPV3gwb6/EsDFGwLNveJebydE6IpsfRWzi3uAAJczTurlf9Wl47LKneoOK0hv3Gqjpddxfj8nI0q2OMB4ff+wPsxCVJ27MK5ZexLEuuxDbrs9FTedGcT6urRn9YcyoDabvDa1ap1Ov12oNKZeM4tVrNZd/vNFz81cnk+vp6ul7Pu82F+4coPy2XI0/L0XL5VB6PF4tmtztfT91Sk8nEhdfp94cttwrH2bgMez0xZftFmdD/D12BwpeL+0AAAAAASUVORK5CYII=",
243
+ width=200, # Manually Adjust the width of the image as per requirement
244
+ )
245
 
246
+ video_url = st.text_input("Youtube video url")
247
+ st.text("Aggiungere il video puo impiegare un bel pò. Avvia e vatti a fare una canna")
248
  if st.button("Add video"):
249
  # Create a temporary file
250
  # Write the uploaded PDF to the temporary file
251
+ try:
252
+ download_and_transcribe_youtube(video_url)
253
+ st.success("Video Added")
254
+ except Exception as e:
255
+ st.error(f"{str(e)}")
 
 
 
 
 
 
 
 
 
 
services/document_manager/document_loader.py CHANGED
@@ -1,4 +1,4 @@
1
- from langchain.document_loaders import PyPDFLoader
2
  import pandas as pd
3
  from langchain.document_loaders import WebBaseLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
@@ -18,9 +18,6 @@ class DocumentsLoader:
18
  def is_notion_url(self, url):
19
  # Regular expressions to match Notion URLs
20
  return "notion" in url
21
- # notion_regex = r"https://(www\.)?(notion\.so|notion\.site)/"
22
- # Check if the URL matches the Notion regex
23
- # return re.match(notion_regex, url) is not None
24
 
25
  def is_pdf_url(self, url):
26
  # Define a list of common PDF file extensions
@@ -32,6 +29,16 @@ class DocumentsLoader:
32
  return True
33
  return False
34
 
 
 
 
 
 
 
 
 
 
 
35
  def is_valid_url(self, url):
36
  # TODO: handle status codes not 200
37
  try:
@@ -42,7 +49,7 @@ class DocumentsLoader:
42
  return False
43
 
44
  def load_docs(self, doc_urls: list) -> list:
45
- web_urls, pdf_urls, notion_urls, docs = [], [], [], []
46
  if isinstance(doc_urls[0], list):
47
  doc_urls = [doc[0] for doc in doc_urls]
48
  # doc_urls = doc_urls[0]
@@ -56,6 +63,8 @@ class DocumentsLoader:
56
  pdf_urls.append(url)
57
  if self.is_notion_url(url):
58
  notion_urls.append(url)
 
 
59
  else:
60
  web_urls.append(url)
61
 
@@ -72,9 +81,6 @@ class DocumentsLoader:
72
 
73
  # load pdf urls
74
  if len(pdf_urls) > 0:
75
- # print("n urls", pdf_urls)
76
- # pdf_urls = [url for url in pdf_urls if self.is_valid_url(url)]
77
- # print("n urls", pdf_urls)
78
  for pdf_url in pdf_urls:
79
  try:
80
  pdf_loader = PyPDFLoader(pdf_url)
@@ -83,19 +89,15 @@ class DocumentsLoader:
83
  except Exception as e:
84
  print(f"Error pdf loader, {pdf_url}: {str(e)}")
85
 
86
- # notion loade: not working
87
- # if len(notion_urls) > 0:
88
- # print("ADDING NOTION URLS")
89
- # notion_urls = [url for url in notion_urls if self.is_notion_url(url)]
90
- # for notion_url in notion_urls:
91
- # print(notion_url)
92
- # try:
93
- # notion_loader = NotionDirectoryLoader(notion_url)
94
- # notion_docs = notion_loader.load()
95
- # print("Notion docs ", notion_docs)
96
- # docs = notion_docs + docs
97
- # except Exception as e:
98
- # print(f"Error notion loader, {notion_url}: {str(e)}")
99
  return docs
100
 
101
  def split_docs(self, docs, chunk_size=2000):
 
1
+ from langchain.document_loaders import PyPDFLoader, TextLoader
2
  import pandas as pd
3
  from langchain.document_loaders import WebBaseLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
 
18
  def is_notion_url(self, url):
19
  # Regular expressions to match Notion URLs
20
  return "notion" in url
 
 
 
21
 
22
  def is_pdf_url(self, url):
23
  # Define a list of common PDF file extensions
 
29
  return True
30
  return False
31
 
32
+ def is_txt_url(self, url):
33
+ # Define a list of common PDF file extensions
34
+ pdf_extensions = [".txt"]
35
+
36
+ # Check if the URL ends with a PDF file extension
37
+ for extension in pdf_extensions:
38
+ if url.endswith(extension):
39
+ return True
40
+ return False
41
+
42
  def is_valid_url(self, url):
43
  # TODO: handle status codes not 200
44
  try:
 
49
  return False
50
 
51
  def load_docs(self, doc_urls: list) -> list:
52
+ web_urls, pdf_urls, notion_urls, text_urls, docs = [], [], [], [], []
53
  if isinstance(doc_urls[0], list):
54
  doc_urls = [doc[0] for doc in doc_urls]
55
  # doc_urls = doc_urls[0]
 
63
  pdf_urls.append(url)
64
  if self.is_notion_url(url):
65
  notion_urls.append(url)
66
+ if self.is_txt_url(url):
67
+ text_urls.append(url)
68
  else:
69
  web_urls.append(url)
70
 
 
81
 
82
  # load pdf urls
83
  if len(pdf_urls) > 0:
 
 
 
84
  for pdf_url in pdf_urls:
85
  try:
86
  pdf_loader = PyPDFLoader(pdf_url)
 
89
  except Exception as e:
90
  print(f"Error pdf loader, {pdf_url}: {str(e)}")
91
 
92
+ if len(text_urls) > 0:
93
+ for txt_url in text_urls:
94
+ try:
95
+ txt_loader = TextLoader(txt_url)
96
+ txt_docs = txt_loader.load()
97
+ docs = docs + txt_docs
98
+ except Exception as e:
99
+ print(f"Error pdf loader, {txt_url}: {str(e)}")
100
+
 
 
 
 
101
  return docs
102
 
103
  def split_docs(self, docs, chunk_size=2000):
test_marcello.csv DELETED
@@ -1,3 +0,0 @@
1
- url
2
- https://en.wikipedia.org/wiki/Dragon_Ball
3
- https://en.wikipedia.org/wiki/Naruto