# GPT Chatbot 

# Create Conda virtual environment
# conda create --name gpt_chatbot  python=3.9.4
# conda activate gpt_chatbot

# Installation
# pip install streamlit pypdf2 langchain python-dotenv faiss-cpu openai huggingface_hub
# pip install tiktoken 

# pip install InstructorEmbedding sentence_transformers

# Could not import tiktoken python package. This is needed in order to for OpenAIEmbeddings. Please install it with `pip install tiktoken`.
# run the app using the following command in anaconda VS Code terminal
# streamlit run app.py
import os
import time
from loguru import logger

import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS # FAISS instead of PineCone
from langchain.llms import OpenAI
from langchain.llms import HuggingFaceHub
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template 

os.environ["TZ"] = "Asia/Shanghai"
try:
    time.tzset()
except Exception:
    ...  # Windows
    logger.warning("Windows, cant set time.tzset()")


def get_pdf_text(pdf_docs):
    text =""
    for pdf in pdf_docs:
        pdf_reader = PdfReader(pdf)
        for page in pdf_reader.pages:
            text += page.extract_text()
    return text

def get_text_chunks(text):
    text_splitter = CharacterTextSplitter(
        separator="\n",
        chunk_size=1000,
        chunk_overlap=200,
        length_function=len
    )
    chunks = text_splitter.split_text(text)
    return  chunks

def get_vectorstore(text_chunks):
    # embeddings = OpenAIEmbeddings()
    model_name = "hkunlp/instructor-xl"
    model_name = "hkunlp/instructor-large"
    model_name = "hkunlp/instructor-base"
    logger.info(f"Loading {model_name}")
    embeddings = HuggingFaceInstructEmbeddings(model_name=model_name)
    logger.info(f"Done loading {model_name}")

    logger.info(f"Doing vectorstore FAISS.from_texts(texts=text_chunks, embedding=embeddings)")
    vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
    logger.info(f"Done vectorstore FAISS.from_texts(texts=text_chunks, embedding=embeddings)")

    return vectorstore


def get_conversation_chain(vectorstore):
    llm = OpenAI()
    #llm = ChatOpenAI()
    #llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
    memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
    conversation_chain = ConversationalRetrievalChain.from_llm(
        llm=llm,
        retriever=vectorstore.as_retriever(),
        memory=memory
    )
    return conversation_chain


def handle_userinput(user_question):
    # st.session_state.conversation contains all the configuration from our vectorstore and memory.
    response = st.session_state.conversation({'question': user_question})
    # st.write(response)
    st.session_state.chat_history = response['chat_history']

    for i, message in enumerate(st.session_state.chat_history):
        if i % 2 == 0:
            st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
        else:
            st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)


def main():
    load_dotenv()
    st.set_page_config(page_title="Chat with multiple law journal PDFs",
                        page_icon=":books:")
    
    st.write(css, unsafe_allow_html=True)
    
    if "conversation" not in st.session_state:
        st.session_state.conversation = None

    if "chat_history" not in st.session_state:
        st.session_state.chat_history = None
    
    st.header("Chat with multiple PDFs :books:")

    user_question = st.text_input("Ask a question about your documents:")
    if user_question:
        handle_userinput(user_question)

    #st.write(user_template.replace("{{MSG}}", "hello robot"), unsafe_allow_html=True)
    #st.write(bot_template.replace("{{MSG}}", "hello human"), unsafe_allow_html=True)

    # "https://i.ibb.co/rdZC7LZ/Photo-logo-1.png"
    # "https://huggingface.co/spaces/gli-mrunal/GPT_instruct_chatbot/blob/main/images/bot.jpg" 
    # "https://huggingface.co/spaces/gli-mrunal/GPT_instruct_chatbot/blob/main/images/CSUN_Matadors_logo.svg.png"

    with st.sidebar:
        st.subheader("Your documents")

        pdf_docs = st.file_uploader(
            "Upload your PDfs here and click 'Process'", accept_multiple_files=True)
        if st.button("Process"):
            with st.spinner("Processing"):
                # ---------------   get pdf text  -------------------

                raw_text = get_pdf_text(pdf_docs)
                #st.write(raw_text)

                # ----------   get the text chunks  -------------------------

                text_chunks = get_text_chunks(raw_text)
                #st.write(text_chunks)


                # --------------   create vector store------------------------
                # https://openai.com/pricing  --> Embedding Models
                # Chose to use the best embedding model - intructor_xl ranked higher than OpenAi's embeddings from huggingface leaderboard
                # https://huggingface.co/spaces/mteb/leaderboard

                logger.info("Start get_vectorstore")
                vectorstore = get_vectorstore(text_chunks)
                logger.info("Done get_vectorstore")

                logger.info("Start create conversation chain")
                # create conversation chain
                st.session_state.conversation = get_conversation_chain(vectorstore)
                #conversation = get_conversation_chain(vectorstore)
                logger.info("Done create conversation chain")
                
    #st.session_state.conversation
         

if __name__ == '__main__':
    main()