Cheselle commited on
Commit
fbba7ae
1 Parent(s): b2a005f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -0
app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain.prompts import ChatPromptTemplate
3
+ from langchain.schema import StrOutputParser
4
+ from langchain.schema.runnable import Runnable
5
+ from langchain.schema.runnable.config import RunnableConfig
6
+ from typing import cast
7
+ import os
8
+ from langchain_community.document_loaders import PyMuPDFLoader
9
+ from langchain_experimental.text_splitter import SemanticChunker
10
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
11
+ from langchain_openai.embeddings import OpenAIEmbeddings
12
+ from langchain_community.vectorstores import Qdrant
13
+ from langchain_core.runnables import RunnablePassthrough, RunnableParallel
14
+ from operator import itemgetter
15
+ import chainlit as cl
16
+ from openai import AsyncOpenAI
17
+ from dotenv import load_dotenv
18
+
19
+ load_dotenv()
20
+
21
+ # Set up API key for OpenAI
22
+ os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
23
+
24
+ """
25
+ "What is the AI Bill of Rights, and how does it affect the development of AI systems in the U.S.?"
26
+
27
+ "How is the government planning to regulate AI technologies in relation to privacy and data security?"
28
+
29
+ "What are the key principles outlined in the NIST AI Risk Management Framework?"
30
+
31
+ "How will the AI Bill of Rights affect businesses developing AI solutions for consumers?"
32
+
33
+ "What role does the government play in ensuring that AI is developed ethically and responsibly?"
34
+
35
+ "How might the outcomes of the upcoming elections impact AI regulation and policy?"
36
+
37
+ "What are the risks associated with using AI in political campaigns and decision-making?"
38
+
39
+ "How do the NIST guidelines help organizations reduce bias and ensure fairness in AI applications?"
40
+
41
+ "How are other countries approaching AI regulation compared to the U.S., and what can we learn from them?"
42
+
43
+ "What challenges do businesses face in complying with government guidelines like the AI Bill of Rights and NIST framework?"
44
+
45
+ """
46
+ @cl.on_chat_start
47
+ async def on_chat_start():
48
+ model = ChatOpenAI(streaming=True)
49
+
50
+ # Define RAG prompt template
51
+ prompt = ChatPromptTemplate.from_messages(
52
+ [
53
+ (
54
+ "system",
55
+ "You're a very knowledgeable AI engineer who's good at explaining stuff like ELI5."
56
+ ),
57
+ ("human", "{context}\n\nQuestion: {question}")
58
+ ]
59
+ )
60
+
61
+ # Load documents and create retriever
62
+ ai_framework_document = PyMuPDFLoader(file_path="https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf").load()
63
+ ai_blueprint_document = PyMuPDFLoader(file_path="https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf").load()
64
+
65
+
66
+ def metadata_generator(document, name):
67
+ fixed_text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
68
+ chunk_overlap=100,
69
+ separators=["\n\n", "\n", ".", "!", "?"]
70
+ )
71
+ collection = fixed_text_splitter.split_documents(document)
72
+ for doc in collection:
73
+ doc.metadata["source"] = name
74
+ return collection
75
+
76
+ recursive_framework_document = metadata_generator(ai_framework_document, "AI Framework")
77
+ recursive_blueprint_document = metadata_generator(ai_blueprint_document, "AI Blueprint")
78
+ combined_documents = recursive_framework_document + recursive_blueprint_document
79
+
80
+ from transformers import AutoModel
81
+ embeddings = AutoModel.from_pretrained("Cheselle/finetuned-arctic-sentence")
82
+
83
+ # Vector store and retriever
84
+ vectorstore = Qdrant.from_documents(
85
+ documents=combined_documents,
86
+ embedding=embeddings,
87
+ location=":memory:",
88
+ collection_name="AI Policy"
89
+ )
90
+
91
+ retriever = vectorstore.as_retriever()
92
+
93
+ # Set the retriever and prompt into session for reuse
94
+ cl.user_session.set("runnable", model)
95
+ cl.user_session.set("retriever", retriever)
96
+ cl.user_session.set("prompt_template", prompt)
97
+
98
+
99
+
100
+ @cl.on_message
101
+ async def on_message(message: cl.Message):
102
+ # Get the stored model, retriever, and prompt
103
+ model = cast(ChatOpenAI, cl.user_session.get("runnable")) # type: ChatOpenAI
104
+ retriever = cl.user_session.get("retriever") # Get the retriever from the session
105
+ prompt_template = cl.user_session.get("prompt_template") # Get the RAG prompt template
106
+
107
+ # Log the message content
108
+ print(f"Received message: {message.content}")
109
+
110
+ # Retrieve relevant context from documents based on the user's message
111
+ relevant_docs = retriever.get_relevant_documents(message.content)
112
+ print(f"Retrieved {len(relevant_docs)} documents.")
113
+
114
+ if not relevant_docs:
115
+ print("No relevant documents found.")
116
+ await cl.Message(content="Sorry, I couldn't find any relevant documents.").send()
117
+ return
118
+
119
+ context = "\n\n".join([doc.page_content for doc in relevant_docs])
120
+
121
+ # Log the context to check
122
+ print(f"Context: {context}")
123
+
124
+ # Construct the final RAG prompt
125
+ final_prompt = prompt_template.format(context=context, question=message.content)
126
+ print(f"Final prompt: {final_prompt}")
127
+
128
+ # Initialize a streaming message
129
+ msg = cl.Message(content="")
130
+
131
+ # Stream the response from the model
132
+ async for chunk in model.astream(
133
+ final_prompt,
134
+ config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
135
+ ):
136
+ # Extract the content from AIMessageChunk and concatenate it to the message
137
+ await msg.stream_token(chunk.content)
138
+
139
+ await msg.send()