eaglelandsonce commited on
Commit
acc9eef
·
1 Parent(s): 3713982

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. Genesis.pdf +0 -0
  3. app.py +92 -0
  4. requirements.txt +7 -0
  5. t_kjv.pdf +3 -0
  6. utils.py +67 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ t_kjv.pdf filter=lfs diff=lfs merge=lfs -text
Genesis.pdf ADDED
Binary file (391 kB). View file
 
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from utils import *
3
+
4
+
5
+ # clear the chat history from streamlit session state
6
+ def clear_history():
7
+ if 'history' in st.session_state:
8
+ del st.session_state['history']
9
+
10
+
11
+ if __name__ == "__main__":
12
+ import os
13
+
14
+ # loading the OpenAI api key from .env
15
+ from dotenv import load_dotenv, find_dotenv
16
+
17
+ load_dotenv(find_dotenv(), override=True)
18
+
19
+ # st.image('img.png')
20
+
21
+ st.subheader('Load a Document and Ask a Question')
22
+ with st.sidebar:
23
+ # text_input for the OpenAI API key (alternative to python-dotenv and .env)
24
+ api_key = st.text_input('OpenAI API Key:', type='password')
25
+ if api_key:
26
+ os.environ['OPENAI_API_KEY'] = api_key
27
+
28
+ # file uploader widget
29
+ uploaded_file = st.file_uploader('Upload a file:', type=['pdf', 'docx', 'txt'])
30
+
31
+ # chunk size number widget
32
+ chunk_size = st.number_input('Chunk size:', min_value=100, max_value=2048, value=512, on_change=clear_history)
33
+
34
+ # k number input widget
35
+ k = st.number_input('k', min_value=1, max_value=20, value=3, on_change=clear_history)
36
+
37
+ # add data button widget
38
+ add_data = st.button('Add Data', on_click=clear_history)
39
+
40
+ if add_data:
41
+ if api_key:
42
+ if uploaded_file and add_data: # if the user browsed a file
43
+ with st.spinner('Reading, chunking and embedding file ...'):
44
+ # writing the file from RAM to the current directory on disk
45
+ bytes_data = uploaded_file.read()
46
+ file_name = os.path.join('./', uploaded_file.name)
47
+ with open(file_name, 'wb') as f:
48
+ f.write(bytes_data)
49
+
50
+ data = load_document(file_name)
51
+ chunks = chunk_data(data, chunk_size=chunk_size)
52
+ st.write(f'Chunk size: {chunk_size}, Chunks: {len(chunks)}')
53
+
54
+ tokens, embedding_cost = calculate_embedding_cost(chunks)
55
+ st.write(f'Embedding cost: ${embedding_cost:.4f}')
56
+
57
+ # creating the embeddings and returning the Chroma vector store
58
+ vector_store = create_embeddings(chunks)
59
+
60
+ # saving the vector store in the streamlit session state (to be persistent between reruns)
61
+ st.session_state.vs = vector_store
62
+ st.success('File uploaded, chunked and embedded successfully.')
63
+ else:
64
+ st.error("Please provide your OpenAI API key above.....")
65
+
66
+ # user's question text input widget
67
+ q = st.text_input('Ask a question about the content of your file:')
68
+ if q: # if the user entered a question and hit enter
69
+ if 'vs' in st.session_state: # if there's the vector store (user uploaded, split and embedded a file)
70
+ vector_store = st.session_state.vs
71
+ st.write(f'k: {k}')
72
+ answer = ask_and_get_answer(vector_store, q, k)
73
+
74
+ # text area widget for the LLM answer
75
+ st.text_area('LLM Answer: ', value=answer)
76
+
77
+ st.divider()
78
+
79
+ # if there's no chat history in the session state, create it
80
+ if 'history' not in st.session_state:
81
+ st.session_state.history = ''
82
+
83
+ # the current question and answer
84
+ value = f'Q: {q} \nA: {answer}'
85
+
86
+ st.session_state.history = f'{value} \n {"-" * 100} \n {st.session_state.history}'
87
+ h = st.session_state.history
88
+
89
+ # text area widget for the chat history
90
+ st.text_area(label='Chat History', value=h, key='history', height=400)
91
+
92
+ # run the app: streamlit run ./chat_with_documents.py
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ openai
2
+ langchain
3
+ chromadb
4
+ docx2txt
5
+ pypdf
6
+ streamlit
7
+ tiktoken
t_kjv.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7ce786526557549671d397cdccceb0704cd5c181cb912bcd33693601326ec16
3
+ size 3765976
utils.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain.embeddings import OpenAIEmbeddings
3
+ from langchain.vectorstores import Chroma
4
+
5
+
6
+ # loading PDF, DOCX and TXT files as LangChain Documents
7
+ def load_document(file):
8
+ import os
9
+ name, extension = os.path.splitext(file)
10
+
11
+ if extension == '.pdf':
12
+ from langchain.document_loaders import PyPDFLoader
13
+ print(f'Loading {file}')
14
+ loader = PyPDFLoader(file)
15
+ elif extension == '.docx':
16
+ from langchain.document_loaders import Docx2txtLoader
17
+ print(f'Loading {file}')
18
+ loader = Docx2txtLoader(file)
19
+ elif extension == '.txt':
20
+ from langchain.document_loaders import TextLoader
21
+ loader = TextLoader(file)
22
+ else:
23
+ print('Document format is not supported!')
24
+ return None
25
+
26
+ data = loader.load()
27
+ return data
28
+
29
+
30
+ # splitting data in chunks
31
+ def chunk_data(data, chunk_size=256, chunk_overlap=20):
32
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
33
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
34
+ chunks = text_splitter.split_documents(data)
35
+ return chunks
36
+
37
+
38
+ # create embeddings using OpenAIEmbeddings() and save them in a Chroma vector store
39
+ def create_embeddings(chunks):
40
+ embeddings = OpenAIEmbeddings()
41
+ vector_store = Chroma.from_documents(chunks, embeddings)
42
+
43
+ # if you want to use a specific directory for chromadb
44
+ # vector_store = Chroma.from_documents(chunks, embeddings, persist_directory='./mychroma_db')
45
+ return vector_store
46
+
47
+
48
+ def ask_and_get_answer(vector_store, q, k=3):
49
+ from langchain.chains import RetrievalQA
50
+ from langchain.chat_models import ChatOpenAI
51
+
52
+ llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=1)
53
+ retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': k})
54
+ chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
55
+
56
+ answer = chain.run(q)
57
+ return answer
58
+
59
+
60
+ # calculate embedding cost using tiktoken
61
+ def calculate_embedding_cost(texts):
62
+ import tiktoken
63
+ enc = tiktoken.encoding_for_model('text-embedding-ada-002')
64
+ total_tokens = sum([len(enc.encode(page.page_content)) for page in texts])
65
+ # print(f'Total Tokens: {total_tokens}')
66
+ # print(f'Embedding Cost in USD: {total_tokens / 1000 * 0.0004:.6f}')
67
+ return total_tokens, total_tokens / 1000 * 0.0004