prompt
stringlengths 51
10k
| completion
stringlengths 8
362
| api
stringlengths 18
90
|
---|---|---|
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
question_generator = | LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT) | langchain.chains.llm.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet promptlayer --upgrade')
import promptlayer # Don't forget this 🍰
from langchain.callbacks import PromptLayerCallbackHandler
from langchain.schema import (
HumanMessage,
)
from langchain_openai import ChatOpenAI
chat_llm = ChatOpenAI(
temperature=0,
callbacks=[PromptLayerCallbackHandler(pl_tags=["chatopenai"])],
)
llm_results = chat_llm(
[
HumanMessage(content="What comes after 1,2,3 ?"),
HumanMessage(content="Tell me another joke?"),
]
)
print(llm_results)
import promptlayer # Don't forget this 🍰
from langchain.callbacks import PromptLayerCallbackHandler
from langchain_community.llms import GPT4All
model = | GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8) | langchain_community.llms.GPT4All |
get_ipython().system('pip install --upgrade langchain langchain-google-vertexai')
project: str = "PUT_YOUR_PROJECT_ID_HERE" # @param {type:"string"}
endpoint_id: str = "PUT_YOUR_ENDPOINT_ID_HERE" # @param {type:"string"}
location: str = "PUT_YOUR_ENDPOINT_LOCAtION_HERE" # @param {type:"string"}
from langchain_google_vertexai import (
GemmaChatVertexAIModelGarden,
GemmaVertexAIModelGarden,
)
llm = GemmaVertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
location=location,
)
output = llm.invoke("What is the meaning of life?")
print(output)
from langchain_core.messages import HumanMessage
llm = GemmaChatVertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
location=location,
)
message1 = HumanMessage(content="How much is 2+2?")
answer1 = llm.invoke([message1])
print(answer1)
message2 = HumanMessage(content="How much is 3+3?")
answer2 = llm.invoke([message1, answer1, message2])
print(answer2)
answer1 = llm.invoke([message1], parse_response=True)
print(answer1)
answer2 = llm.invoke([message1, answer1, message2], parse_response=True)
print(answer2)
get_ipython().system('mkdir -p ~/.kaggle && cp kaggle.json ~/.kaggle/kaggle.json')
get_ipython().system('pip install keras>=3 keras_nlp')
from langchain_google_vertexai import GemmaLocalKaggle
keras_backend: str = "jax" # @param {type:"string"}
model_name: str = "gemma_2b_en" # @param {type:"string"}
llm = GemmaLocalKaggle(model_name=model_name, keras_backend=keras_backend)
output = llm.invoke("What is the meaning of life?", max_tokens=30)
print(output)
from langchain_google_vertexai import GemmaChatLocalKaggle
keras_backend: str = "jax" # @param {type:"string"}
model_name: str = "gemma_2b_en" # @param {type:"string"}
llm = GemmaChatLocalKaggle(model_name=model_name, keras_backend=keras_backend)
from langchain_core.messages import HumanMessage
message1 = HumanMessage(content="Hi! Who are you?")
answer1 = llm.invoke([message1], max_tokens=30)
print(answer1)
message2 = HumanMessage(content="What can you help me with?")
answer2 = llm.invoke([message1, answer1, message2], max_tokens=60)
print(answer2)
answer1 = llm.invoke([message1], max_tokens=30, parse_response=True)
print(answer1)
answer2 = llm.invoke([message1, answer1, message2], max_tokens=60, parse_response=True)
print(answer2)
from langchain_google_vertexai import GemmaChatLocalHF, GemmaLocalHF
hf_access_token: str = "PUT_YOUR_TOKEN_HERE" # @param {type:"string"}
model_name: str = "google/gemma-2b" # @param {type:"string"}
llm = | GemmaLocalHF(model_name="google/gemma-2b", hf_access_token=hf_access_token) | langchain_google_vertexai.GemmaLocalHF |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet polars')
import polars as pl
df = pl.read_csv("example_data/mlb_teams_2012.csv")
df.head()
from langchain_community.document_loaders import PolarsDataFrameLoader
loader = | PolarsDataFrameLoader(df, page_content_column="Team") | langchain_community.document_loaders.PolarsDataFrameLoader |
from langchain_community.chat_models import ChatDatabricks
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client
client = get_deploy_client("databricks")
secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope
name = "my-chat" # rename this if my-chat already exists
client.create_endpoint(
name=name,
config={
"served_entities": [
{
"name": "my-chat",
"external_model": {
"name": "gpt-4",
"provider": "openai",
"task": "llm/v1/chat",
"openai_config": {
"openai_api_key": "{{" + secret + "}}",
},
},
}
],
},
)
chat = ChatDatabricks(
target_uri="databricks",
endpoint=name,
temperature=0.1,
)
chat([HumanMessage(content="hello")])
from langchain_community.embeddings import DatabricksEmbeddings
embeddings = | DatabricksEmbeddings(endpoint="databricks-bge-large-en") | langchain_community.embeddings.DatabricksEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet transformers')
from langchain_community.document_loaders import ImageCaptionLoader
list_image_urls = [
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/5a/Hyla_japonica_sep01.jpg/260px-Hyla_japonica_sep01.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/7/71/Tibur%C3%B3n_azul_%28Prionace_glauca%29%2C_canal_Fayal-Pico%2C_islas_Azores%2C_Portugal%2C_2020-07-27%2C_DD_14.jpg/270px-Tibur%C3%B3n_azul_%28Prionace_glauca%29%2C_canal_Fayal-Pico%2C_islas_Azores%2C_Portugal%2C_2020-07-27%2C_DD_14.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Thure_de_Thulstrup_-_Battle_of_Shiloh.jpg/251px-Thure_de_Thulstrup_-_Battle_of_Shiloh.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Passion_fruits_-_whole_and_halved.jpg/270px-Passion_fruits_-_whole_and_halved.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/5e/Messier83_-_Heic1403a.jpg/277px-Messier83_-_Heic1403a.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/2022-01-22_Men%27s_World_Cup_at_2021-22_St._Moritz%E2%80%93Celerina_Luge_World_Cup_and_European_Championships_by_Sandro_Halank%E2%80%93257.jpg/288px-2022-01-22_Men%27s_World_Cup_at_2021-22_St._Moritz%E2%80%93Celerina_Luge_World_Cup_and_European_Championships_by_Sandro_Halank%E2%80%93257.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/9/99/Wiesen_Pippau_%28Crepis_biennis%29-20220624-RM-123950.jpg/224px-Wiesen_Pippau_%28Crepis_biennis%29-20220624-RM-123950.jpg",
]
loader = ImageCaptionLoader(path_images=list_image_urls)
list_docs = loader.load()
list_docs
import requests
from PIL import Image
Image.open(requests.get(list_image_urls[0], stream=True).raw).convert("RGB")
from langchain.indexes import VectorstoreIndexCreator
index = | VectorstoreIndexCreator() | langchain.indexes.VectorstoreIndexCreator |
from langchain_community.vectorstores import Bagel
texts = ["hello bagel", "hello langchain", "I love salad", "my car", "a dog"]
cluster = Bagel.from_texts(cluster_name="testing", texts=texts)
cluster.similarity_search("bagel", k=3)
cluster.similarity_search_with_score("bagel", k=3)
cluster.delete_cluster()
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)[:10]
cluster = Bagel.from_documents(cluster_name="testing_with_docs", documents=docs)
query = "What did the president say about Ketanji Brown Jackson"
docs = cluster.similarity_search(query)
print(docs[0].page_content[:102])
texts = ["hello bagel", "this is langchain"]
cluster = | Bagel.from_texts(cluster_name="testing", texts=texts) | langchain_community.vectorstores.Bagel.from_texts |
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
template = """Answer the users question based only on the following context:
<context>
{context}
</context>
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0)
search = DuckDuckGoSearchAPIWrapper()
def retriever(query):
return search.run(query)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
simple_query = "what is langchain?"
chain.invoke(simple_query)
distracted_query = "man that sam bankman fried trial was crazy! what is langchain?"
chain.invoke(distracted_query)
retriever(distracted_query)
template = """Provide a better search query for \
web search engine to answer the given question, end \
the queries with ’**’. Question: \
{x} Answer:"""
rewrite_prompt = ChatPromptTemplate.from_template(template)
from langchain import hub
rewrite_prompt = | hub.pull("langchain-ai/rewrite") | langchain.hub.pull |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
from langchain_community.tools.google_trends import GoogleTrendsQueryRun
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
os.environ["SERPAPI_API_KEY"] = ""
tool = GoogleTrendsQueryRun(api_wrapper= | GoogleTrendsAPIWrapper() | langchain_community.utilities.google_trends.GoogleTrendsAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-translate')
from langchain_community.document_transformers import GoogleTranslateTransformer
from langchain_core.documents import Document
sample_text = """[Generated with Google Bard]
Subject: Key Business Process Updates
Date: Friday, 27 October 2023
Dear team,
I am writing to provide an update on some of our key business processes.
Sales process
We have recently implemented a new sales process that is designed to help us close more deals and grow our revenue. The new process includes a more rigorous qualification process, a more streamlined proposal process, and a more effective customer relationship management (CRM) system.
Marketing process
We have also revamped our marketing process to focus on creating more targeted and engaging content. We are also using more social media and paid advertising to reach a wider audience.
Customer service process
We have also made some improvements to our customer service process. We have implemented a new customer support system that makes it easier for customers to get help with their problems. We have also hired more customer support representatives to reduce wait times.
Overall, we are very pleased with the progress we have made on improving our key business processes. We believe that these changes will help us to achieve our goals of growing our business and providing our customers with the best possible experience.
If you have any questions or feedback about any of these changes, please feel free to contact me directly.
Thank you,
Lewis Cymbal
CEO, Cymbal Bank
"""
documents = [Document(page_content=sample_text)]
translator = | GoogleTranslateTransformer(project_id="<YOUR_PROJECT_ID>") | langchain_community.document_transformers.GoogleTranslateTransformer |
from langchain_community.document_loaders import NewsURLLoader
urls = [
"https://www.bbc.com/news/world-us-canada-66388172",
"https://www.bbc.com/news/entertainment-arts-66384971",
]
loader = | NewsURLLoader(urls=urls) | langchain_community.document_loaders.NewsURLLoader |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = | load_qa_chain(llm, chain_type="map_reduce") | langchain.chains.question_answering.load_qa_chain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import OpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = | hub.pull("hwchase17/react") | langchain.hub.pull |
from langchain_community.document_loaders import HNLoader
loader = | HNLoader("https://news.ycombinator.com/item?id=34817881") | langchain_community.document_loaders.HNLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pygithub')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit
from langchain_community.utilities.github import GitHubAPIWrapper
from langchain_openai import ChatOpenAI
os.environ["GITHUB_APP_ID"] = "123456"
os.environ["GITHUB_APP_PRIVATE_KEY"] = "path/to/your/private-key.pem"
os.environ["GITHUB_REPOSITORY"] = "username/repo-name"
os.environ["GITHUB_BRANCH"] = "bot-branch-name"
os.environ["GITHUB_BASE_BRANCH"] = "main"
os.environ["OPENAI_API_KEY"] = ""
llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
github = | GitHubAPIWrapper() | langchain_community.utilities.github.GitHubAPIWrapper |
from langchain_community.vectorstores import Bagel
texts = ["hello bagel", "hello langchain", "I love salad", "my car", "a dog"]
cluster = | Bagel.from_texts(cluster_name="testing", texts=texts) | langchain_community.vectorstores.Bagel.from_texts |
from langchain_community.document_loaders import UnstructuredPowerPointLoader
loader = | UnstructuredPowerPointLoader("example_data/fake-power-point.pptx") | langchain_community.document_loaders.UnstructuredPowerPointLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
os.environ["OUTLINE_API_KEY"] = "xxx"
os.environ["OUTLINE_INSTANCE_URL"] = "https://app.getoutline.com"
from langchain.retrievers import OutlineRetriever
retriever = | OutlineRetriever() | langchain.retrievers.OutlineRetriever |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pillow open_clip_torch torch matplotlib')
import open_clip
open_clip.list_pretrained()
import numpy as np
from langchain_experimental.open_clip import OpenCLIPEmbeddings
from PIL import Image
uri_dog = "/Users/rlm/Desktop/test/dog.jpg"
uri_house = "/Users/rlm/Desktop/test/house.jpg"
clip_embd = | OpenCLIPEmbeddings(model_name="ViT-g-14", checkpoint="laion2b_s34b_b88k") | langchain_experimental.open_clip.OpenCLIPEmbeddings |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True
)
qa_chain("What can tenants do with signage on their properties?")
chain_response = qa_chain("What is rentable area for the property owned by DHA Group?")
chain_response["result"] # correct answer should be 13,500 sq ft
chain_response["source_documents"]
loader = DocugamiLoader(docset_id="zo954yqy53wp")
loader.include_xml_tags = (
True # for additional semantics from the Docugami knowledge graph
)
chunks = loader.load()
print(chunks[0].metadata)
get_ipython().system('poetry run pip install --upgrade lark --quiet')
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_community.vectorstores.chroma import Chroma
EXCLUDE_KEYS = ["id", "xpath", "structure"]
metadata_field_info = [
AttributeInfo(
name=key,
description=f"The {key} for this chunk",
type="string",
)
for key in chunks[0].metadata
if key.lower() not in EXCLUDE_KEYS
]
document_content_description = "Contents of this chunk"
llm = OpenAI(temperature=0)
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = SelfQueryRetriever.from_llm(
llm, vectordb, document_content_description, metadata_field_info, verbose=True
)
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
verbose=True,
)
qa_chain(
"What is rentable area for the property owned by DHA Group?"
) # correct answer should be 13,500 sq ft
from typing import Dict, List
from langchain_community.document_loaders import DocugamiLoader
from langchain_core.documents import Document
loader = DocugamiLoader(docset_id="zo954yqy53wp")
loader.include_xml_tags = (
True # for additional semantics from the Docugami knowledge graph
)
loader.parent_hierarchy_levels = 3 # for expanded context
loader.max_text_length = (
1024 * 8
) # 8K chars are roughly 2K tokens (ref: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them)
loader.include_project_metadata_in_doc_metadata = (
False # Not filtering on vector metadata, so remove to lighten the vectors
)
chunks: List[Document] = loader.load()
parents_by_id: Dict[str, Document] = {}
children_by_id: Dict[str, Document] = {}
for chunk in chunks:
chunk_id = chunk.metadata.get("id")
parent_chunk_id = chunk.metadata.get(loader.parent_id_key)
if not parent_chunk_id:
parents_by_id[chunk_id] = chunk
else:
children_by_id[chunk_id] = chunk
for id, chunk in list(children_by_id.items())[:5]:
parent_chunk_id = chunk.metadata.get(loader.parent_id_key)
if parent_chunk_id:
print(f"PARENT CHUNK {parent_chunk_id}: {parents_by_id[parent_chunk_id]}")
print(f"CHUNK {id}: {chunk}")
from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType
from langchain.storage import InMemoryStore
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAIEmbeddings
vectorstore = Chroma(collection_name="big2small", embedding_function= | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-pinecone langchain-openai langchain')
from langchain_community.document_loaders import TextLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
from langchain_pinecone import PineconeVectorStore
index_name = "langchain-test-index"
docsearch = | PineconeVectorStore.from_documents(docs, embeddings, index_name=index_name) | langchain_pinecone.PineconeVectorStore.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = | WikipediaQueryRun(api_wrapper=api_wrapper) | langchain_community.tools.WikipediaQueryRun |
import os
os.environ["EXA_API_KEY"] = "..."
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_exa import ExaSearchRetriever, TextContentsOptions
from langchain_openai import ChatOpenAI
retriever = ExaSearchRetriever(
k=5, text_contents_options=TextContentsOptions(max_length=200)
)
prompt = PromptTemplate.from_template(
"""Answer the following query based on the following context:
query: {query}
<context>
{context}
</context"""
)
llm = ChatOpenAI()
chain = (
RunnableParallel({"context": retriever, "query": RunnablePassthrough()})
| prompt
| llm
)
chain.invoke("When is the best time to visit japan?")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
from exa_py import Exa
from langchain.agents import tool
exa = Exa(api_key=os.environ["EXA_API_KEY"])
@tool
def search(query: str):
"""Search for a webpage based on the query."""
return exa.search(f"{query}", use_autoprompt=True, num_results=5)
@tool
def find_similar(url: str):
"""Search for webpages similar to a given URL.
The url passed in should be a URL returned from `search`.
"""
return exa.find_similar(url, num_results=5)
@tool
def get_contents(ids: list[str]):
"""Get the contents of a webpage.
The ids passed in should be a list of ids returned from `search`.
"""
return exa.get_contents(ids)
tools = [search, get_contents, find_similar]
from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
system_message = SystemMessage(
content="You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources."
)
agent_prompt = OpenAIFunctionsAgent.create_prompt(system_message)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=agent_prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.run("Summarize for me a fascinating article about cats.")
from exa_py import Exa
from langchain.agents import tool
exa = Exa(api_key=os.environ["Exa_API_KEY"])
@tool
def search(query: str, include_domains=None, start_published_date=None):
"""Search for a webpage based on the query.
Set the optional include_domains (list[str]) parameter to restrict the search to a list of domains.
Set the optional start_published_date (str) parameter to restrict the search to documents published after the date (YYYY-MM-DD).
"""
return exa.search_and_contents(
f"{query}",
use_autoprompt=True,
num_results=5,
include_domains=include_domains,
start_published_date=start_published_date,
)
@tool
def find_similar(url: str):
"""Search for webpages similar to a given URL.
The url passed in should be a URL returned from `search`.
"""
return exa.find_similar_and_contents(url, num_results=5)
@tool
def get_contents(ids: list[str]):
"""Get the contents of a webpage.
The ids passed in should be a list of ids returned from `search`.
"""
return exa.get_contents(ids)
tools = [search, get_contents, find_similar]
from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-4")
system_message = SystemMessage(
content="You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources."
)
agent_prompt = | OpenAIFunctionsAgent.create_prompt(system_message) | langchain.agents.OpenAIFunctionsAgent.create_prompt |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_core.tools import tool
@tool
def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:
"""Do something complex with a complex tool."""
return int_arg * float_arg
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
model_with_tools = model.bind_tools(
[complex_tool],
tool_choice="complex_tool",
)
from operator import itemgetter
from langchain.output_parsers import JsonOutputKeyToolsParser
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| complex_tool
)
chain.invoke(
"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg"
)
from typing import Any
from langchain_core.runnables import RunnableConfig
def try_except_tool(tool_args: dict, config: RunnableConfig) -> Runnable:
try:
complex_tool.invoke(tool_args, config=config)
except Exception as e:
return f"Calling tool with arguments:\n\n{tool_args}\n\nraised the following error:\n\n{type(e)}: {e}"
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| try_except_tool
)
print(
chain.invoke(
"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg"
)
)
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| complex_tool
)
better_model = ChatOpenAI(model="gpt-4-1106-preview", temperature=0).bind_tools(
[complex_tool], tool_choice="complex_tool"
)
better_chain = (
better_model
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| complex_tool
)
chain_with_fallback = chain.with_fallbacks([better_chain])
chain_with_fallback.invoke(
"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg"
)
import json
from typing import Any
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough
class CustomToolException(Exception):
"""Custom LangChain tool exception."""
def __init__(self, tool_call: dict, exception: Exception) -> None:
super().__init__()
self.tool_call = tool_call
self.exception = exception
def tool_custom_exception(tool_call: dict, config: RunnableConfig) -> Runnable:
try:
return complex_tool.invoke(tool_call["args"], config=config)
except Exception as e:
raise CustomToolException(tool_call, e)
def exception_to_messages(inputs: dict) -> dict:
exception = inputs.pop("exception")
tool_call = {
"type": "function",
"function": {
"name": "complex_tool",
"arguments": json.dumps(exception.tool_call["args"]),
},
"id": exception.tool_call["id"],
}
messages = [
| AIMessage(content="", additional_kwargs={"tool_calls": [tool_call]}) | langchain_core.messages.AIMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import OpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = | WikipediaQueryRun(api_wrapper=api_wrapper) | langchain_community.tools.WikipediaQueryRun |
from langchain_community.document_loaders import RoamLoader
loader = | RoamLoader("Roam_DB") | langchain_community.document_loaders.RoamLoader |
from getpass import getpass
KAY_API_KEY = getpass()
OPENAI_API_KEY = getpass()
import os
os.environ["KAY_API_KEY"] = KAY_API_KEY
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain.chains import ConversationalRetrievalChain
from langchain.retrievers import KayAiRetriever
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model_name="gpt-3.5-turbo")
retriever = KayAiRetriever.create(
dataset_id="company", data_types=["10-K", "10-Q"], num_contexts=6
)
qa = | ConversationalRetrievalChain.from_llm(model, retriever=retriever) | langchain.chains.ConversationalRetrievalChain.from_llm |
import uuid
from pathlib import Path
import langchain
import torch
from bs4 import BeautifulSoup as Soup
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryByteStore, LocalFileStore
from langchain_community.document_loaders.recursive_url_loader import (
RecursiveUrlLoader,
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa
DOCSTORE_DIR = "."
DOCSTORE_ID_KEY = "doc_id"
loader = RecursiveUrlLoader(
"https://ar5iv.labs.arxiv.org/html/1706.03762",
max_depth=2,
extractor=lambda x: Soup(x, "html.parser").text,
)
data = loader.load()
print(f"Loaded {len(data)} documents")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
print(f"Split into {len(all_splits)} documents")
from langchain_community.embeddings import QuantizedBiEncoderEmbeddings
from langchain_core.embeddings import Embeddings
model_name = "Intel/bge-small-en-v1.5-rag-int8-static"
encode_kwargs = {"normalize_embeddings": True} # set True to compute cosine similarity
model_inc = QuantizedBiEncoderEmbeddings(
model_name=model_name,
encode_kwargs=encode_kwargs,
query_instruction="Represent this sentence for searching relevant passages: ",
)
def get_multi_vector_retriever(
docstore_id_key: str, collection_name: str, embedding_function: Embeddings
):
"""Create the composed retriever object."""
vectorstore = Chroma(
collection_name=collection_name,
embedding_function=embedding_function,
)
store = InMemoryByteStore()
return MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=docstore_id_key,
)
retriever = get_multi_vector_retriever(DOCSTORE_ID_KEY, "multi_vec_store", model_inc)
child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
id_key = "doc_id"
doc_ids = [str(uuid.uuid4()) for _ in all_splits]
sub_docs = []
for i, doc in enumerate(all_splits):
_id = doc_ids[i]
_sub_docs = child_text_splitter.split_documents([doc])
for _doc in _sub_docs:
_doc.metadata[id_key] = _id
sub_docs.extend(_sub_docs)
retriever.vectorstore.add_documents(sub_docs)
retriever.docstore.mset(list(zip(doc_ids, all_splits)))
import torch
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "Intel/neural-chat-7b-v3-3"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id, device_map="auto", torch_dtype=torch.bfloat16
)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=100)
hf = HuggingFacePipeline(pipeline=pipe)
from langchain import hub
prompt = hub.pull("rlm/rag-prompt")
from langchain.schema.runnable import RunnablePassthrough
rag_chain = {"context": retriever, "question": | RunnablePassthrough() | langchain.schema.runnable.RunnablePassthrough |
import logging
from langchain.retrievers import RePhraseQueryRetriever
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
logging.basicConfig()
logging.getLogger("langchain.retrievers.re_phraser").setLevel(logging.INFO)
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = | RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) | langchain_text_splitters.RecursiveCharacterTextSplitter |
from langchain.agents import Tool
from langchain_experimental.utilities import PythonREPL
python_repl = | PythonREPL() | langchain_experimental.utilities.PythonREPL |
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()
docs = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson"
)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
compressor = LLMChainExtractor.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import LLMChainFilter
_filter = | LLMChainFilter.from_llm(llm) | langchain.retrievers.document_compressors.LLMChainFilter.from_llm |
from langchain_community.document_loaders import OBSDirectoryLoader
endpoint = "your-endpoint"
config = {"ak": "your-access-key", "sk": "your-secret-key"}
loader = | OBSDirectoryLoader("your-bucket-name", endpoint=endpoint, config=config) | langchain_community.document_loaders.OBSDirectoryLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = | FakeListLLM(responses=responses) | langchain_community.llms.fake.FakeListLLM |
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
llm = OpenAI(temperature=0)
from pathlib import Path
relevant_parts = []
for p in Path(".").absolute().parts:
relevant_parts.append(p)
if relevant_parts[-3:] == ["langchain", "docs", "modules"]:
break
doc_path = str(Path(*relevant_parts) / "state_of_the_union.txt")
from langchain_community.document_loaders import TextLoader
loader = TextLoader(doc_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = | Chroma.from_documents(texts, embeddings, collection_name="state-of-union") | langchain_community.vectorstores.Chroma.from_documents |
get_ipython().run_line_magic('pip', 'install -U --quiet langchain langchain_community openai chromadb langchain-experimental')
get_ipython().run_line_magic('pip', 'install --quiet "unstructured[all-docs]" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken')
import logging
import zipfile
import requests
logging.basicConfig(level=logging.INFO)
data_url = "https://storage.googleapis.com/benchmarks-artifacts/langchain-docs-benchmarking/cj.zip"
result = requests.get(data_url)
filename = "cj.zip"
with open(filename, "wb") as file:
file.write(result.content)
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall()
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader("./cj/cj.pdf")
docs = loader.load()
tables = []
texts = [d.page_content for d in docs]
len(texts)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatVertexAI
from langchain_community.llms import VertexAI
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = PromptTemplate.from_template(prompt_text)
empty_response = RunnableLambda(
lambda x: AIMessage(content="Error processing document")
)
model = VertexAI(
temperature=0, model_name="gemini-pro", max_output_tokens=1024
).with_fallbacks([empty_response])
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = []
table_summaries = []
if texts and summarize_texts:
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 1})
elif texts:
text_summaries = texts
if tables:
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 1})
return text_summaries, table_summaries
text_summaries, table_summaries = generate_text_summaries(
texts, tables, summarize_texts=True
)
len(text_summaries)
import base64
import os
from langchain_core.messages import HumanMessage
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Make image summary"""
model = ChatVertexAI(model_name="gemini-pro-vision", max_output_tokens=1024)
msg = model(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
)
]
)
return msg.content
def generate_img_summaries(path):
"""
Generate summaries and base64 encoded strings for images
path: Path to list of .jpg files extracted by Unstructured
"""
img_base64_list = []
image_summaries = []
prompt = """You are an assistant tasked with summarizing images for retrieval. \
These summaries will be embedded and used to retrieve the raw image. \
Give a concise summary of the image that is well optimized for retrieval."""
for img_file in sorted(os.listdir(path)):
if img_file.endswith(".jpg"):
img_path = os.path.join(path, img_file)
base64_image = encode_image(img_path)
img_base64_list.append(base64_image)
image_summaries.append(image_summarize(base64_image, prompt))
return img_base64_list, image_summaries
img_base64_list, image_summaries = generate_img_summaries("./cj")
len(image_summaries)
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.embeddings import VertexAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
def create_multi_vector_retriever(
vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images
):
"""
Create retriever that indexes summaries, but returns raw images or texts
"""
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
if text_summaries:
add_documents(retriever, text_summaries, texts)
if table_summaries:
add_documents(retriever, table_summaries, tables)
if image_summaries:
add_documents(retriever, image_summaries, images)
return retriever
vectorstore = Chroma(
collection_name="mm_rag_cj_blog",
embedding_function=VertexAIEmbeddings(model_name="textembedding-gecko@latest"),
)
retriever_multi_vector_img = create_multi_vector_retriever(
vectorstore,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
img_base64_list,
)
import io
import re
from IPython.display import HTML, display
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from PIL import Image
def plt_img_base64(img_base64):
"""Disply base64 encoded string as image"""
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
display(HTML(image_html))
def looks_like_base64(sb):
"""Check if the string looks like base64"""
return re.match("^[A-Za-z0-9+/]+[=]{0,2}$", sb) is not None
def is_image_data(b64data):
"""
Check if the base64 data is an image by looking at the start of the data
"""
image_signatures = {
b"\xFF\xD8\xFF": "jpg",
b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "png",
b"\x47\x49\x46\x38": "gif",
b"\x52\x49\x46\x46": "webp",
}
try:
header = base64.b64decode(b64data)[:8] # Decode and get the first 8 bytes
for sig, format in image_signatures.items():
if header.startswith(sig):
return True
return False
except Exception:
return False
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def split_image_text_types(docs):
"""
Split base64-encoded images and texts
"""
b64_images = []
texts = []
for doc in docs:
if isinstance(doc, Document):
doc = doc.page_content
if looks_like_base64(doc) and is_image_data(doc):
doc = resize_base64_image(doc, size=(1300, 600))
b64_images.append(doc)
else:
texts.append(doc)
if len(b64_images) > 0:
return {"images": b64_images[:1], "texts": []}
return {"images": b64_images, "texts": texts}
def img_prompt_func(data_dict):
"""
Join the context into a single string
"""
formatted_texts = "\n".join(data_dict["context"]["texts"])
messages = []
text_message = {
"type": "text",
"text": (
"You are financial analyst tasking with providing investment advice.\n"
"You will be given a mixed of text, tables, and image(s) usually of charts or graphs.\n"
"Use this information to provide investment advice related to the user question. \n"
f"User-provided question: {data_dict['question']}\n\n"
"Text and / or tables:\n"
f"{formatted_texts}"
),
}
messages.append(text_message)
if data_dict["context"]["images"]:
for image in data_dict["context"]["images"]:
image_message = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image}"},
}
messages.append(image_message)
return [HumanMessage(content=messages)]
def multi_modal_rag_chain(retriever):
"""
Multi-modal RAG chain
"""
model = ChatVertexAI(
temperature=0, model_name="gemini-pro-vision", max_output_tokens=1024
)
chain = (
{
"context": retriever | | RunnableLambda(split_image_text_types) | langchain_core.runnables.RunnableLambda |
import asyncio
import os
import nest_asyncio
import pandas as pd
from langchain.docstore.document import Document
from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain_experimental.autonomous_agents import AutoGPT
from langchain_openai import ChatOpenAI
nest_asyncio.apply()
llm = ChatOpenAI(model_name="gpt-4", temperature=1.0)
import os
from contextlib import contextmanager
from typing import Optional
from langchain.agents import tool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
ROOT_DIR = "./data/"
@contextmanager
def pushd(new_dir):
"""Context manager for changing the current working directory."""
prev_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(prev_dir)
@tool
def process_csv(
csv_file_path: str, instructions: str, output_path: Optional[str] = None
) -> str:
"""Process a CSV by with pandas in a limited REPL.\
Only use this after writing data to disk as a csv file.\
Any figures must be saved to disk to be viewed by the human.\
Instructions should be written in natural language, not code. Assume the dataframe is already loaded."""
with pushd(ROOT_DIR):
try:
df = pd.read_csv(csv_file_path)
except Exception as e:
return f"Error: {e}"
agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)
if output_path is not None:
instructions += f" Save output to disk at {output_path}"
try:
result = agent.run(instructions)
return result
except Exception as e:
return f"Error: {e}"
async def async_load_playwright(url: str) -> str:
"""Load the specified URLs using Playwright and parse using BeautifulSoup."""
from bs4 import BeautifulSoup
from playwright.async_api import async_playwright
results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
try:
page = await browser.new_page()
await page.goto(url)
page_source = await page.content()
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results
def run_async(coro):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro)
@tool
def browse_web_page(url: str) -> str:
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
return run_async(async_load_playwright(url))
from langchain.chains.qa_with_sources.loading import (
BaseCombineDocumentsChain,
load_qa_with_sources_chain,
)
from langchain.tools import BaseTool, DuckDuckGoSearchRun
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pydantic import Field
def _get_text_splitter():
return RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=20,
length_function=len,
)
class WebpageQATool(BaseTool):
name = "query_webpage"
description = (
"Browse a webpage and retrieve the information relevant to the question."
)
text_splitter: RecursiveCharacterTextSplitter = Field(
default_factory=_get_text_splitter
)
qa_chain: BaseCombineDocumentsChain
def _run(self, url: str, question: str) -> str:
"""Useful for browsing websites and scraping the text information."""
result = browse_web_page.run(url)
docs = [Document(page_content=result, metadata={"source": url})]
web_docs = self.text_splitter.split_documents(docs)
results = []
for i in range(0, len(web_docs), 4):
input_docs = web_docs[i : i + 4]
window_result = self.qa_chain(
{"input_documents": input_docs, "question": question},
return_only_outputs=True,
)
results.append(f"Response from window {i} - {window_result}")
results_docs = [
Document(page_content="\n".join(results), metadata={"source": url})
]
return self.qa_chain(
{"input_documents": results_docs, "question": question},
return_only_outputs=True,
)
async def _arun(self, url: str, question: str) -> str:
raise NotImplementedError
query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm))
import faiss
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
web_search = DuckDuckGoSearchRun()
tools = [
web_search,
WriteFileTool(root_dir="./data"),
| ReadFileTool(root_dir="./data") | langchain_community.tools.file_management.read.ReadFileTool |
get_ipython().run_cell_magic('writefile', 'discord_chats.txt', "talkingtower — 08/15/2023 11:10 AM\nLove music! Do you like jazz?\nreporterbob — 08/15/2023 9:27 PM\nYes! Jazz is fantastic. Ever heard this one?\nWebsite\nListen to classic jazz track...\n\ntalkingtower — Yesterday at 5:03 AM\nIndeed! Great choice. 🎷\nreporterbob — Yesterday at 5:23 AM\nThanks! How about some virtual sightseeing?\nWebsite\nVirtual tour of famous landmarks...\n\ntalkingtower — Today at 2:38 PM\nSounds fun! Let's explore.\nreporterbob — Today at 2:56 PM\nEnjoy the tour! See you around.\ntalkingtower — Today at 3:00 PM\nThank you! Goodbye! 👋\nreporterbob — Today at 3:02 PM\nFarewell! Happy exploring.\n")
import logging
import re
from typing import Iterator, List
from langchain_community.chat_loaders import base as chat_loaders
from langchain_core.messages import BaseMessage, HumanMessage
logger = logging.getLogger()
class DiscordChatLoader(chat_loaders.BaseChatLoader):
def __init__(self, path: str):
"""
Initialize the Discord chat loader.
Args:
path: Path to the exported Discord chat text file.
"""
self.path = path
self._message_line_regex = re.compile(
r"(.+?) — (\w{3,9} \d{1,2}(?:st|nd|rd|th)?(?:, \d{4})? \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa
flags=re.DOTALL,
)
def _load_single_chat_session_from_txt(
self, file_path: str
) -> chat_loaders.ChatSession:
"""
Load a single chat session from a text file.
Args:
file_path: Path to the text file containing the chat messages.
Returns:
A `ChatSession` object containing the loaded chat messages.
"""
with open(file_path, "r", encoding="utf-8") as file:
lines = file.readlines()
results: List[BaseMessage] = []
current_sender = None
current_timestamp = None
current_content = []
for line in lines:
if re.match(
r".+? — (\d{2}/\d{2}/\d{4} \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa
line,
):
if current_sender and current_content:
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
current_sender, current_timestamp = line.split(" — ")[:2]
current_content = [
line[len(current_sender) + len(current_timestamp) + 4 :].strip()
]
elif re.match(r"\[\d{1,2}:\d{2} (?:AM|PM)\]", line.strip()):
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
current_timestamp = line.strip()[1:-1]
current_content = []
else:
current_content.append("\n" + line.strip())
if current_sender and current_content:
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
return chat_loaders.ChatSession(messages=results)
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
"""
Lazy load the messages from the chat file and yield them in the required format.
Yields:
A `ChatSession` object containing the loaded chat messages.
"""
yield self._load_single_chat_session_from_txt(self.path)
loader = DiscordChatLoader(
path="./discord_chats.txt",
)
from typing import List
from langchain_community.chat_loaders.base import ChatSession
from langchain_community.chat_loaders.utils import (
map_ai_messages,
merge_chat_runs,
)
raw_messages = loader.lazy_load()
merged_messages = | merge_chat_runs(raw_messages) | langchain_community.chat_loaders.utils.merge_chat_runs |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet html2text')
from langchain_community.document_loaders import AsyncHtmlLoader
urls = ["https://www.espn.com", "https://lilianweng.github.io/posts/2023-06-23-agent/"]
loader = AsyncHtmlLoader(urls)
docs = loader.load()
from langchain_community.document_transformers import Html2TextTransformer
urls = ["https://www.espn.com", "https://lilianweng.github.io/posts/2023-06-23-agent/"]
html2text = | Html2TextTransformer() | langchain_community.document_transformers.Html2TextTransformer |
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('poetry run pip install replicate')
from getpass import getpass
REPLICATE_API_TOKEN = getpass()
import os
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Replicate
llm = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
)
prompt = """
User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?
Assistant:
"""
llm(prompt)
llm = Replicate(
model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
)
prompt = """
Answer the following yes/no question by reasoning step by step.
Can a dog drive a car?
"""
llm(prompt)
text2image = Replicate(
model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf",
model_kwargs={"image_dimensions": "512x512"},
)
image_output = text2image("A cat riding a motorcycle by Picasso")
image_output
get_ipython().system('poetry run pip install Pillow')
from io import BytesIO
import requests
from PIL import Image
response = requests.get(image_output)
img = Image.open(BytesIO(response.content))
img
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = Replicate(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
)
prompt = """
User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?
Assistant:
"""
_ = llm(prompt)
import time
llm = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.01, "max_length": 500, "top_p": 1},
)
prompt = """
User: What is the best way to learn python?
Assistant:
"""
start_time = time.perf_counter()
raw_output = llm(prompt) # raw output, no stop
end_time = time.perf_counter()
print(f"Raw output:\n {raw_output}")
print(f"Raw output runtime: {end_time - start_time} seconds")
start_time = time.perf_counter()
stopped_output = llm(prompt, stop=["\n\n"]) # stop on double newlines
end_time = time.perf_counter()
print(f"Stopped output:\n {stopped_output}")
print(f"Stopped output runtime: {end_time - start_time} seconds")
from langchain.chains import SimpleSequentialChain
dolly_llm = Replicate(
model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
)
text2image = | Replicate(
model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf"
) | langchain_community.llms.Replicate |
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable datastore.googleapis.com')
from langchain_core.documents import Document
from langchain_google_datastore import DatastoreSaver
data = [Document(page_content="Hello, World!")]
saver = DatastoreSaver()
saver.upsert_documents(data)
saver = DatastoreSaver("Collection")
saver.upsert_documents(data)
doc_ids = ["AnotherCollection/doc_id", "foo/bar"]
saver = DatastoreSaver()
saver.upsert_documents(documents=data, document_ids=doc_ids)
from langchain_google_datastore import DatastoreLoader
loader_collection = | DatastoreLoader("Collection") | langchain_google_datastore.DatastoreLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet predictionguard langchain')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import PredictionGuard
os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>"
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
pgllm("Tell me a joke")
template = """Respond to the following query based on the context.
Context: EVERY comment, DM + email suggestion has led us to this EXCITING announcement! 🎉 We have officially added TWO new candle subscription box options! 📦
Exclusive Candle Box - $80
Monthly Candle Box - $45 (NEW!)
Scent of The Month Box - $28 (NEW!)
Head to stories to get ALLL the deets on each box! 👆 BONUS: Save 50% on your first box with code 50OFF! 🎉
Query: {query}
Result: """
prompt = PromptTemplate.from_template(template)
pgllm(prompt.format(query="What kind of post is this?"))
pgllm = PredictionGuard(
model="OpenAI-text-davinci-003",
output={
"type": "categorical",
"categories": ["product announcement", "apology", "relational"],
},
)
pgllm(prompt.format(query="What kind of post is this?"))
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
import os
from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.llms import GradientLLM
from getpass import getpass
if not os.environ.get("GRADIENT_ACCESS_TOKEN", None):
os.environ["GRADIENT_ACCESS_TOKEN"] = getpass("gradient.ai access token:")
if not os.environ.get("GRADIENT_WORKSPACE_ID", None):
os.environ["GRADIENT_WORKSPACE_ID"] = getpass("gradient.ai workspace id:")
if not os.environ.get("GRADIENT_MODEL_ADAPTER_ID", None):
os.environ["GRADIENT_MODEL_ID"] = getpass("gradient.ai model id:")
llm = GradientLLM(
model_id=os.environ["GRADIENT_MODEL_ID"],
)
tools = | load_tools(["memorize"], llm=llm) | langchain.agents.load_tools |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet predictionguard langchain')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import PredictionGuard
os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>"
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
pgllm("Tell me a joke")
template = """Respond to the following query based on the context.
Context: EVERY comment, DM + email suggestion has led us to this EXCITING announcement! 🎉 We have officially added TWO new candle subscription box options! 📦
Exclusive Candle Box - $80
Monthly Candle Box - $45 (NEW!)
Scent of The Month Box - $28 (NEW!)
Head to stories to get ALLL the deets on each box! 👆 BONUS: Save 50% on your first box with code 50OFF! 🎉
Query: {query}
Result: """
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()
docs = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson"
)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
compressor = | LLMChainExtractor.from_llm(llm) | langchain.retrievers.document_compressors.LLMChainExtractor.from_llm |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet yfinance')
import os
os.environ["OPENAI_API_KEY"] = "..."
from langchain.agents import AgentType, initialize_agent
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.0)
tools = [ | YahooFinanceNewsTool() | langchain_community.tools.yahoo_finance_news.YahooFinanceNewsTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet annoy')
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Annoy
embeddings_func = HuggingFaceEmbeddings()
texts = ["pizza is great", "I love salad", "my car", "a dog"]
vector_store = Annoy.from_texts(texts, embeddings_func)
vector_store_v2 = Annoy.from_texts(
texts, embeddings_func, metric="dot", n_trees=100, n_jobs=1
)
vector_store.similarity_search("food", k=3)
vector_store.similarity_search_with_score("food", k=3)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txtn.txtn.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
docs[:5]
vector_store_from_docs = Annoy.from_documents(docs, embeddings_func)
query = "What did the president say about Ketanji Brown Jackson"
docs = vector_store_from_docs.similarity_search(query)
print(docs[0].page_content[:100])
embs = embeddings_func.embed_documents(texts)
data = list(zip(texts, embs))
vector_store_from_embeddings = | Annoy.from_embeddings(data, embeddings_func) | langchain_community.vectorstores.Annoy.from_embeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet rank_bm25')
from langchain.retrievers import BM25Retriever
retriever = | BM25Retriever.from_texts(["foo", "bar", "world", "hello", "foo bar"]) | langchain.retrievers.BM25Retriever.from_texts |
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
llm = OpenAI(temperature=0)
from pathlib import Path
relevant_parts = []
for p in Path(".").absolute().parts:
relevant_parts.append(p)
if relevant_parts[-3:] == ["langchain", "docs", "modules"]:
break
doc_path = str(Path(*relevant_parts) / "state_of_the_union.txt")
from langchain_community.document_loaders import TextLoader
loader = TextLoader(doc_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
state_of_union = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=docsearch.as_retriever()
)
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/")
docs = loader.load()
ruff_texts = text_splitter.split_documents(docs)
ruff_db = | Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff") | langchain_community.vectorstores.Chroma.from_documents |
get_ipython().system('pip install databricks-sql-connector')
from langchain_community.utilities import SQLDatabase
db = SQLDatabase.from_databricks(catalog="samples", schema="nyctaxi")
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model_name="gpt-4")
from langchain_community.utilities import SQLDatabaseChain
db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)
db_chain.run(
"What is the average duration of taxi rides that start between midnight and 6am?"
)
from langchain.agents import create_sql_agent
from langchain_community.agent_toolkits import SQLDatabaseToolkit
toolkit = | SQLDatabaseToolkit(db=db, llm=llm) | langchain_community.agent_toolkits.SQLDatabaseToolkit |
from langchain.evaluation import RegexMatchStringEvaluator
evaluator = | RegexMatchStringEvaluator() | langchain.evaluation.RegexMatchStringEvaluator |
get_ipython().run_line_magic('pip', 'install -qU esprima esprima tree_sitter tree_sitter_languages')
import warnings
warnings.filterwarnings("ignore")
from pprint import pprint
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import LanguageParser
from langchain_text_splitters import Language
loader = GenericLoader.from_filesystem(
"./example_data/source_code",
glob="*",
suffixes=[".py", ".js"],
parser=LanguageParser(),
)
docs = loader.load()
len(docs)
for document in docs:
pprint(document.metadata)
print("\n\n--8<--\n\n".join([document.page_content for document in docs]))
loader = GenericLoader.from_filesystem(
"./example_data/source_code",
glob="*",
suffixes=[".py"],
parser= | LanguageParser(language=Language.PYTHON, parser_threshold=1000) | langchain_community.document_loaders.parsers.LanguageParser |
from getpass import getpass
from langchain_community.document_loaders.larksuite import LarkSuiteDocLoader
DOMAIN = input("larksuite domain")
ACCESS_TOKEN = getpass("larksuite tenant_access_token or user_access_token")
DOCUMENT_ID = input("larksuite document id")
from pprint import pprint
larksuite_loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)
docs = larksuite_loader.load()
pprint(docs)
from langchain.chains.summarize import load_summarize_chain
from langchain_community.llms.fake import FakeListLLM
llm = | FakeListLLM() | langchain_community.llms.fake.FakeListLLM |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cos-python-sdk-v5')
from langchain_community.document_loaders import TencentCOSFileLoader
from qcloud_cos import CosConfig
conf = CosConfig(
Region="your cos region",
SecretId="your cos secret_id",
SecretKey="your cos secret_key",
)
loader = | TencentCOSFileLoader(conf=conf, bucket="you_cos_bucket", key="fake.docx") | langchain_community.document_loaders.TencentCOSFileLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet marqo')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Marqo
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet elasticsearch == 7.11.0')
import getpass
import os
os.environ["QIANFAN_AK"] = getpass.getpass("Your Qianfan AK:")
os.environ["QIANFAN_SK"] = getpass.getpass("Your Qianfan SK:")
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
embeddings = | QianfanEmbeddingsEndpoint() | langchain_community.embeddings.QianfanEmbeddingsEndpoint |
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()
docs = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson"
)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
compressor = LLMChainExtractor.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import LLMChainFilter
_filter = LLMChainFilter.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=_filter, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
embeddings_filter = | EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76) | langchain.retrievers.document_compressors.EmbeddingsFilter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet unstructured')
from langchain_community.document_loaders import UnstructuredEmailLoader
loader = UnstructuredEmailLoader("example_data/fake-email.eml")
data = loader.load()
data
loader = | UnstructuredEmailLoader("example_data/fake-email.eml", mode="elements") | langchain_community.document_loaders.UnstructuredEmailLoader |
import os
os.environ["EXA_API_KEY"] = "..."
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_exa import ExaSearchRetriever, TextContentsOptions
from langchain_openai import ChatOpenAI
retriever = ExaSearchRetriever(
k=5, text_contents_options=TextContentsOptions(max_length=200)
)
prompt = PromptTemplate.from_template(
"""Answer the following query based on the following context:
query: {query}
<context>
{context}
</context"""
)
llm = ChatOpenAI()
chain = (
RunnableParallel({"context": retriever, "query": RunnablePassthrough()})
| prompt
| llm
)
chain.invoke("When is the best time to visit japan?")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
from exa_py import Exa
from langchain.agents import tool
exa = Exa(api_key=os.environ["EXA_API_KEY"])
@tool
def search(query: str):
"""Search for a webpage based on the query."""
return exa.search(f"{query}", use_autoprompt=True, num_results=5)
@tool
def find_similar(url: str):
"""Search for webpages similar to a given URL.
The url passed in should be a URL returned from `search`.
"""
return exa.find_similar(url, num_results=5)
@tool
def get_contents(ids: list[str]):
"""Get the contents of a webpage.
The ids passed in should be a list of ids returned from `search`.
"""
return exa.get_contents(ids)
tools = [search, get_contents, find_similar]
from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
system_message = | SystemMessage(
content="You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources."
) | langchain_core.messages.SystemMessage |
import os
import comet_llm
os.environ["LANGCHAIN_COMET_TRACING"] = "true"
comet_llm.init()
os.environ["COMET_PROJECT_NAME"] = "comet-example-langchain-tracing"
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?") # this should be traced
if "LANGCHAIN_COMET_TRACING" in os.environ:
del os.environ["LANGCHAIN_COMET_TRACING"]
from langchain.callbacks.tracers.comet import CometTracer
tracer = | CometTracer() | langchain.callbacks.tracers.comet.CometTracer |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-storage')
from langchain_community.document_loaders import GCSFileLoader
loader = GCSFileLoader(project_name="aist", bucket="testing-hwc", blob="fake.docx")
loader.load()
from langchain_community.document_loaders import PyPDFLoader
def load_pdf(file_path):
return | PyPDFLoader(file_path) | langchain_community.document_loaders.PyPDFLoader |
get_ipython().system(' pip install --quiet pypdf chromadb tiktoken openai langchain-together')
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader("~/Desktop/mixtral.pdf")
data = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
"""
from langchain_together.embeddings import TogetherEmbeddings
embeddings = TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval")
"""
vectorstore = Chroma.from_documents(
documents=all_splits,
collection_name="rag-chroma",
embedding= | OpenAIEmbeddings() | langchain_community.embeddings.OpenAIEmbeddings |
import os
from langchain.indexes import VectorstoreIndexCreator
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_community.document_loaders.figma import FigmaFileLoader
from langchain_openai import ChatOpenAI
figma_loader = FigmaFileLoader(
os.environ.get("ACCESS_TOKEN"),
os.environ.get("NODE_IDS"),
os.environ.get("FILE_KEY"),
)
index = | VectorstoreIndexCreator() | langchain.indexes.VectorstoreIndexCreator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyvespa')
from vespa.package import ApplicationPackage, Field, RankProfile
app_package = ApplicationPackage(name="testapp")
app_package.schema.add_fields(
Field(
name="text", type="string", indexing=["index", "summary"], index="enable-bm25"
),
Field(
name="embedding",
type="tensor<float>(x[384])",
indexing=["attribute", "summary"],
attribute=["distance-metric: angular"],
),
)
app_package.schema.add_rank_profile(
RankProfile(
name="default",
first_phase="closeness(field, embedding)",
inputs=[("query(query_embedding)", "tensor<float>(x[384])")],
)
)
from vespa.deployment import VespaDocker
vespa_docker = VespaDocker()
vespa_app = vespa_docker.deploy(application_package=app_package)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
vespa_config = dict(
page_content_field="text",
embedding_field="embedding",
input_field="query_embedding",
)
from langchain_community.vectorstores import VespaStore
db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
query = "What did the president say about Ketanji Brown Jackson"
results = db.similarity_search(query)
print(results[0].page_content)
query = "What did the president say about Ketanji Brown Jackson"
results = db.similarity_search(query)
result = results[0]
result.page_content = "UPDATED: " + result.page_content
db.add_texts([result.page_content], [result.metadata], result.metadata["id"])
results = db.similarity_search(query)
print(results[0].page_content)
result = db.similarity_search(query)
db.delete(["32"])
result = db.similarity_search(query)
results = db.similarity_search_with_score(query)
result = results[0]
db = | VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config) | langchain_community.vectorstores.VespaStore.from_documents |
from getpass import getpass
STOCHASTICAI_API_KEY = getpass()
import os
os.environ["STOCHASTICAI_API_KEY"] = STOCHASTICAI_API_KEY
YOUR_API_URL = getpass()
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import StochasticAI
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = | StochasticAI(api_url=YOUR_API_URL) | langchain_community.llms.StochasticAI |
import re
from typing import Union
from langchain.agents import (
AgentExecutor,
AgentOutputParser,
LLMSingleActionAgent,
)
from langchain.chains import LLMChain
from langchain.prompts import StringPromptTemplate
from langchain_community.agent_toolkits import NLAToolkit
from langchain_community.tools.plugin import AIPlugin
from langchain_core.agents import AgentAction, AgentFinish
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
urls = [
"https://datasette.io/.well-known/ai-plugin.json",
"https://api.speak.com/.well-known/ai-plugin.json",
"https://www.wolframalpha.com/.well-known/ai-plugin.json",
"https://www.zapier.com/.well-known/ai-plugin.json",
"https://www.klarna.com/.well-known/ai-plugin.json",
"https://www.joinmilo.com/.well-known/ai-plugin.json",
"https://slack.com/.well-known/ai-plugin.json",
"https://schooldigger.com/.well-known/ai-plugin.json",
]
AI_PLUGINS = [ | AIPlugin.from_url(url) | langchain_community.tools.plugin.AIPlugin.from_url |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet manifest-ml')
from langchain_community.llms.manifest import ManifestWrapper
from manifest import Manifest
manifest = Manifest(
client_name="huggingface", client_connection="http://127.0.0.1:5000"
)
print(manifest.client_pool.get_current_client().get_model_params())
llm = ManifestWrapper(
client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 256}
)
from langchain.chains.mapreduce import MapReduceChain
from langchain.prompts import PromptTemplate
from langchain_text_splitters import CharacterTextSplitter
_prompt = """Write a concise summary of the following:
{text}
CONCISE SUMMARY:"""
prompt = PromptTemplate.from_template(_prompt)
text_splitter = CharacterTextSplitter()
mp_chain = | MapReduceChain.from_params(llm, prompt, text_splitter) | langchain.chains.mapreduce.MapReduceChain.from_params |
get_ipython().run_line_magic('pip', 'install tika')
import os
from langchain_community.vectorstores import LLMRails
os.environ["LLM_RAILS_DATASTORE_ID"] = "Your datastore id "
os.environ["LLM_RAILS_API_KEY"] = "Your API Key"
llm_rails = | LLMRails.from_texts(["Your text here"]) | langchain_community.vectorstores.LLMRails.from_texts |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser()
for txt in chain.stream({"input": "What's your name?"}):
print(txt, end="")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert coding AI. Respond only in valid python; no narration whatsoever.",
),
("user", "{input}"),
]
)
chain = prompt | | ChatNVIDIA(model="llama2_code_70b") | langchain_nvidia_ai_endpoints.ChatNVIDIA |
from typing import Callable, List
import tenacity
from langchain.output_parsers import RegexParser
from langchain.prompts import PromptTemplate
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
class BiddingDialogueAgent(DialogueAgent):
def __init__(
self,
name,
system_message: SystemMessage,
bidding_template: PromptTemplate,
model: ChatOpenAI,
) -> None:
super().__init__(name, system_message, model)
self.bidding_template = bidding_template
def bid(self) -> str:
"""
Asks the chat model to output a bid to speak
"""
prompt = PromptTemplate(
input_variables=["message_history", "recent_message"],
template=self.bidding_template,
).format(
message_history="\n".join(self.message_history),
recent_message=self.message_history[-1],
)
bid_string = self.model([SystemMessage(content=prompt)]).content
return bid_string
character_names = ["Donald Trump", "Kanye West", "Elizabeth Warren"]
topic = "transcontinental high speed rail"
word_limit = 50
game_description = f"""Here is the topic for the presidential debate: {topic}.
The presidential candidates are: {', '.join(character_names)}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of each presidential candidate."
)
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities.
Speak directly to {character_name}.
Do not add anything else."""
),
]
character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt
).content
return character_description
def generate_character_header(character_name, character_description):
return f"""{game_description}
Your name is {character_name}.
You are a presidential candidate.
Your description is as follows: {character_description}
You are debating the topic: {topic}.
Your goal is to be as creative as possible and make the voters think you are the best candidate.
"""
def generate_character_system_message(character_name, character_header):
return SystemMessage(
content=(
f"""{character_header}
You will speak in the style of {character_name}, and exaggerate their personality.
You will come up with creative ideas related to {topic}.
Do not say the same things over and over again.
Speak in the first person from the perspective of {character_name}
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Speak only from the perspective of {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
character_descriptions = [
generate_character_description(character_name) for character_name in character_names
]
character_headers = [
generate_character_header(character_name, character_description)
for character_name, character_description in zip(
character_names, character_descriptions
)
]
character_system_messages = [
generate_character_system_message(character_name, character_headers)
for character_name, character_headers in zip(character_names, character_headers)
]
for (
character_name,
character_description,
character_header,
character_system_message,
) in zip(
character_names,
character_descriptions,
character_headers,
character_system_messages,
):
print(f"\n\n{character_name} Description:")
print(f"\n{character_description}")
print(f"\n{character_header}")
print(f"\n{character_system_message.content}")
class BidOutputParser(RegexParser):
def get_format_instructions(self) -> str:
return "Your response should be an integer delimited by angled brackets, like this: <int>."
bid_parser = BidOutputParser(
regex=r"<(\d+)>", output_keys=["bid"], default_output_key="bid"
)
def generate_character_bidding_template(character_header):
bidding_template = f"""{character_header}
```
{{message_history}}
```
On the scale of 1 to 10, where 1 is not contradictory and 10 is extremely contradictory, rate how contradictory the following message is to your ideas.
```
{{recent_message}}
```
{bid_parser.get_format_instructions()}
Do nothing else.
"""
return bidding_template
character_bidding_templates = [
generate_character_bidding_template(character_header)
for character_header in character_headers
]
for character_name, bidding_template in zip(
character_names, character_bidding_templates
):
print(f"{character_name} Bidding Template:")
print(bidding_template)
topic_specifier_prompt = [
| SystemMessage(content="You can make a task more specific.") | langchain.schema.SystemMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-text-splitters tiktoken')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=100, chunk_overlap=0
)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import TokenTextSplitter
text_splitter = TokenTextSplitter(chunk_size=10, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import SpacyTextSplitter
text_splitter = SpacyTextSplitter(chunk_size=1000)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import SentenceTransformersTokenTextSplitter
splitter = | SentenceTransformersTokenTextSplitter(chunk_overlap=0) | langchain_text_splitters.SentenceTransformersTokenTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet redis redisvl langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redis_url = "redis://localhost:6379"
redis_url = "redis://:secret@redis:7379/2"
redis_url = "redis://joe:secret@redis/0"
redis_url = "redis+sentinel://localhost:26379"
redis_url = "redis+sentinel://joe:secret@redis"
redis_url = "redis+sentinel://redis:26379/zone-1/2"
redis_url = "rediss://localhost:6379"
redis_url = "rediss+sentinel://localhost"
metadata = [
{
"user": "john",
"age": 18,
"job": "engineer",
"credit_score": "high",
},
{
"user": "derrick",
"age": 45,
"job": "doctor",
"credit_score": "low",
},
{
"user": "nancy",
"age": 94,
"job": "doctor",
"credit_score": "high",
},
{
"user": "tyler",
"age": 100,
"job": "engineer",
"credit_score": "high",
},
{
"user": "joe",
"age": 35,
"job": "dentist",
"credit_score": "medium",
},
]
texts = ["foo", "foo", "foo", "bar", "bar"]
from langchain_community.vectorstores.redis import Redis
rds = Redis.from_texts(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users",
)
rds.index_name
get_ipython().system('rvl index listall')
get_ipython().system('rvl index info -i users')
get_ipython().system('rvl stats -i users')
results = rds.similarity_search("foo")
print(results[0].page_content)
results = rds.similarity_search("foo", k=3)
meta = results[1].metadata
print("Key of the document in Redis: ", meta.pop("id"))
print("Metadata of the document: ", meta)
results = rds.similarity_search_with_score("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_score("foo", k=5, distance_threshold=0.1)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Similiarity: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5, score_threshold=0.9)
for result in results:
print(f"Content: {result[0].page_content} --- Similarity: {result[1]}")
new_document = ["baz"]
new_metadata = [{"user": "sam", "age": 50, "job": "janitor", "credit_score": "high"}]
rds.add_texts(new_document, new_metadata)
results = rds.similarity_search("baz", k=3)
print(results[0].metadata)
results = rds.max_marginal_relevance_search("foo")
results = rds.max_marginal_relevance_search("foo", lambda_mult=0.1)
rds.write_schema("redis_schema.yaml")
new_rds = Redis.from_existing_index(
embeddings,
index_name="users",
redis_url="redis://localhost:6379",
schema="redis_schema.yaml",
)
results = new_rds.similarity_search("foo", k=3)
print(results[0].metadata)
new_rds.schema == rds.schema
index_schema = {
"tag": [{"name": "credit_score"}],
"text": [{"name": "user"}, {"name": "job"}],
"numeric": [{"name": "age"}],
}
rds, keys = Redis.from_texts_return_keys(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users_modified",
index_schema=index_schema, # pass in the new index schema
)
from langchain_community.vectorstores.redis import RedisText
is_engineer = RedisText("job") == "engineer"
results = rds.similarity_search("foo", k=3, filter=is_engineer)
print("Job:", results[0].metadata["job"])
print("Engineers in the dataset:", len(results))
starts_with_doc = RedisText("job") % "doc*"
results = rds.similarity_search("foo", k=3, filter=starts_with_doc)
for result in results:
print("Job:", result.metadata["job"])
print("Jobs in dataset that start with 'doc':", len(results))
from langchain_community.vectorstores.redis import RedisNum
is_over_18 = RedisNum("age") > 18
is_under_99 = | RedisNum("age") | langchain_community.vectorstores.redis.RedisNum |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-pinecone langchain-openai langchain')
from langchain_community.document_loaders import TextLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
from langchain_pinecone import PineconeVectorStore
index_name = "langchain-test-index"
docsearch = PineconeVectorStore.from_documents(docs, embeddings, index_name=index_name)
query = "What did the president say about Ketanji Brown Jackson"
docs = docsearch.similarity_search(query)
print(docs[0].page_content)
vectorstore = | PineconeVectorStore(index_name=index_name, embedding=embeddings) | langchain_pinecone.PineconeVectorStore |
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
template = """Answer the users question based only on the following context:
<context>
{context}
</context>
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0)
search = DuckDuckGoSearchAPIWrapper()
def retriever(query):
return search.run(query)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
simple_query = "what is langchain?"
chain.invoke(simple_query)
distracted_query = "man that sam bankman fried trial was crazy! what is langchain?"
chain.invoke(distracted_query)
retriever(distracted_query)
template = """Provide a better search query for \
web search engine to answer the given question, end \
the queries with ’**’. Question: \
{x} Answer:"""
rewrite_prompt = | ChatPromptTemplate.from_template(template) | langchain_core.prompts.ChatPromptTemplate.from_template |
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
@tool
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
print(multiply.name)
print(multiply.description)
print(multiply.args)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
@tool("search-tool", args_schema=SearchInput, return_direct=True)
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
print(search.return_direct)
from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
class CalculatorInput(BaseModel):
a: int = Field(description="first number")
b: int = Field(description="second number")
class CustomSearchTool(BaseTool):
name = "custom_search"
description = "useful for when you need to answer questions about current events"
args_schema: Type[BaseModel] = SearchInput
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return "LangChain"
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
class CustomCalculatorTool(BaseTool):
name = "Calculator"
description = "useful for when you need to answer questions about math"
args_schema: Type[BaseModel] = CalculatorInput
return_direct: bool = True
def _run(
self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return a * b
async def _arun(
self,
a: int,
b: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("Calculator does not support async")
search = CustomSearchTool()
print(search.name)
print(search.description)
print(search.args)
multiply = CustomCalculatorTool()
print(multiply.name)
print(multiply.description)
print(multiply.args)
print(multiply.return_direct)
def search_function(query: str):
return "LangChain"
search = StructuredTool.from_function(
func=search_function,
name="Search",
description="useful for when you need to answer questions about current events",
)
print(search.name)
print(search.description)
print(search.args)
class CalculatorInput(BaseModel):
a: int = Field(description="first number")
b: int = Field(description="second number")
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
calculator = StructuredTool.from_function(
func=multiply,
name="Calculator",
description="multiply numbers",
args_schema=CalculatorInput,
return_direct=True,
)
print(calculator.name)
print(calculator.description)
print(calculator.args)
from langchain_core.tools import ToolException
def search_tool1(s: str):
raise | ToolException("The search tool1 is not available.") | langchain_core.tools.ToolException |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-community')
import os
os.environ["YDC_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from langchain_community.utilities.you import YouSearchAPIWrapper
utility = YouSearchAPIWrapper(num_web_results=1)
utility
import json
response = utility.raw_results(query="What is the weather in NY")
hits = response["hits"]
print(len(hits))
print(json.dumps(hits, indent=2))
response = utility.results(query="What is the weather in NY")
print(len(response))
print(response)
from langchain_community.retrievers.you import YouRetriever
retriever = YouRetriever(num_web_results=1)
retriever
response = retriever.invoke("What is the weather in NY")
print(len(response))
print(response)
get_ipython().system('pip install --upgrade --quiet langchain-openai')
from langchain_community.retrievers.you import YouRetriever
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
runnable = RunnablePassthrough
retriever = YouRetriever(num_web_results=1)
model = ChatOpenAI(model="gpt-3.5-turbo-16k")
output_parser = StrOutputParser()
prompt = ChatPromptTemplate.from_template(
"""Answer the question based only on the context provided.
Context: {context}
Question: {question}"""
)
chain = (
runnable.assign(context=(lambda x: x["question"]) | retriever)
| prompt
| model
| output_parser
)
output = chain.invoke({"question": "what is the weather in NY today"})
print(output)
prompt = | ChatPromptTemplate.from_template(
"""Answer the question based only on the context provided.
Context: {context}
Question: {question}"""
) | langchain_core.prompts.ChatPromptTemplate.from_template |
get_ipython().system('pip3 install oracle-ads')
import ads
from langchain_community.llms import OCIModelDeploymentVLLM
ads.set_auth("resource_principal")
llm = OCIModelDeploymentVLLM(endpoint="https://<MD_OCID>/predict", model="model_name")
llm.invoke("Who is the first president of United States?")
import os
from langchain_community.llms import OCIModelDeploymentTGI
os.environ["OCI_IAM_TYPE"] = "api_key"
os.environ["OCI_CONFIG_PROFILE"] = "default"
os.environ["OCI_CONFIG_LOCATION"] = "~/.oci"
os.environ["OCI_LLM_ENDPOINT"] = "https://<MD_OCID>/predict"
llm = | OCIModelDeploymentTGI() | langchain_community.llms.OCIModelDeploymentTGI |
from langchain_community.llms.symblai_nebula import Nebula
llm = | Nebula(nebula_api_key="<your_api_key>") | langchain_community.llms.symblai_nebula.Nebula |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet protobuf')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet nucliadb-protos')
import os
os.environ["NUCLIA_ZONE"] = "<YOUR_ZONE>" # e.g. europe-1
os.environ["NUCLIA_NUA_KEY"] = "<YOUR_API_KEY>"
from langchain_community.tools.nuclia import NucliaUnderstandingAPI
nua = | NucliaUnderstandingAPI(enable_ml=False) | langchain_community.tools.nuclia.NucliaUnderstandingAPI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyvespa')
from vespa.package import ApplicationPackage, Field, RankProfile
app_package = ApplicationPackage(name="testapp")
app_package.schema.add_fields(
Field(
name="text", type="string", indexing=["index", "summary"], index="enable-bm25"
),
Field(
name="embedding",
type="tensor<float>(x[384])",
indexing=["attribute", "summary"],
attribute=["distance-metric: angular"],
),
)
app_package.schema.add_rank_profile(
RankProfile(
name="default",
first_phase="closeness(field, embedding)",
inputs=[("query(query_embedding)", "tensor<float>(x[384])")],
)
)
from vespa.deployment import VespaDocker
vespa_docker = VespaDocker()
vespa_app = vespa_docker.deploy(application_package=app_package)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
vespa_config = dict(
page_content_field="text",
embedding_field="embedding",
input_field="query_embedding",
)
from langchain_community.vectorstores import VespaStore
db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
query = "What did the president say about Ketanji Brown Jackson"
results = db.similarity_search(query)
print(results[0].page_content)
query = "What did the president say about Ketanji Brown Jackson"
results = db.similarity_search(query)
result = results[0]
result.page_content = "UPDATED: " + result.page_content
db.add_texts([result.page_content], [result.metadata], result.metadata["id"])
results = db.similarity_search(query)
print(results[0].page_content)
result = db.similarity_search(query)
db.delete(["32"])
result = db.similarity_search(query)
results = db.similarity_search_with_score(query)
result = results[0]
db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
retriever = db.as_retriever()
query = "What did the president say about Ketanji Brown Jackson"
results = retriever.get_relevant_documents(query)
app_package.schema.add_fields(
Field(name="date", type="string", indexing=["attribute", "summary"]),
Field(name="rating", type="int", indexing=["attribute", "summary"]),
Field(name="author", type="string", indexing=["attribute", "summary"]),
)
vespa_app = vespa_docker.deploy(application_package=app_package)
for i, doc in enumerate(docs):
doc.metadata["date"] = f"2023-{(i % 12)+1}-{(i % 28)+1}"
doc.metadata["rating"] = range(1, 6)[i % 5]
doc.metadata["author"] = ["Joe Biden", "Unknown"][min(i, 1)]
vespa_config.update(dict(metadata_fields=["date", "rating", "author"]))
db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
query = "What did the president say about Ketanji Brown Jackson"
results = db.similarity_search(query, filter="rating > 3")
from vespa.package import FieldSet
app_package.schema.add_field_set(FieldSet(name="default", fields=["text"]))
app_package.schema.add_rank_profile(RankProfile(name="bm25", first_phase="bm25(text)"))
vespa_app = vespa_docker.deploy(application_package=app_package)
db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
query = "What did the president say about Ketanji Brown Jackson"
custom_query = {
"yql": "select * from sources * where userQuery()",
"query": query,
"type": "weakAnd",
"ranking": "bm25",
"hits": 4,
}
results = db.similarity_search_with_score(query, custom_query=custom_query)
app_package.schema.add_rank_profile(
RankProfile(
name="hybrid",
first_phase="log(bm25(text)) + 0.5 * closeness(field, embedding)",
inputs=[("query(query_embedding)", "tensor<float>(x[384])")],
)
)
vespa_app = vespa_docker.deploy(application_package=app_package)
db = | VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config) | langchain_community.vectorstores.VespaStore.from_documents |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal= | rl_chain.ToSelectFrom(meals) | langchain_experimental.rl_chain.ToSelectFrom |
from langchain_community.document_loaders.recursive_url_loader import RecursiveUrlLoader
from bs4 import BeautifulSoup as Soup
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url=url, max_depth=2, extractor=lambda x: Soup(x, "html.parser").text
)
docs = loader.load()
docs[0].page_content[:50]
docs[-1].metadata
url = "https://js.langchain.com/docs/modules/memory/integrations/"
loader = | RecursiveUrlLoader(url=url) | langchain_community.document_loaders.recursive_url_loader.RecursiveUrlLoader |
get_ipython().system(' pip install lancedb')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import LanceDB
from langchain.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain.document_loaders.TextLoader |
from typing import List
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
joke_query = "Tell me a joke."
parser = | JsonOutputParser(pydantic_object=Joke) | langchain_core.output_parsers.JsonOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.model_laboratory import ModelLaboratory
from langchain.prompts import PromptTemplate
from langchain_community.llms import Cohere, HuggingFaceHub
from langchain_openai import OpenAI
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:")
llms = [
OpenAI(temperature=0),
Cohere(temperature=0),
| HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 1}) | langchain_community.llms.HuggingFaceHub |
get_ipython().run_line_magic('pip', 'install --quiet pypdf chromadb tiktoken openai')
get_ipython().run_line_magic('pip', 'uninstall -y langchain-fireworks')
get_ipython().run_line_magic('pip', 'install --editable /mnt/disks/data/langchain/libs/partners/fireworks')
import fireworks
print(fireworks)
import fireworks.client
import requests
from langchain_community.document_loaders import PyPDFLoader
url = "https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf"
response = requests.get(url, stream=True)
file_name = "temp_file.pdf"
with open(file_name, "wb") as pdf:
pdf.write(response.content)
loader = | PyPDFLoader(file_name) | langchain_community.document_loaders.PyPDFLoader |
from langchain.document_loaders.csv_loader import CSVLoader
loader = | CSVLoader("data/corp_sens_data.csv") | langchain.document_loaders.csv_loader.CSVLoader |
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = | WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) | langchain_community.utilities.WikipediaAPIWrapper |
from langchain_community.document_loaders import UnstructuredOrgModeLoader
loader = | UnstructuredOrgModeLoader(file_path="example_data/README.org", mode="elements") | langchain_community.document_loaders.UnstructuredOrgModeLoader |
from langchain_community.document_loaders import CoNLLULoader
loader = | CoNLLULoader("example_data/conllu.conllu") | langchain_community.document_loaders.CoNLLULoader |
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt", encoding="utf-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, text in enumerate(texts):
text.metadata["source"] = f"{i}-pl"
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings)
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
qa_chain = create_qa_with_sources_chain(llm)
doc_prompt = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
final_qa_chain = StuffDocumentsChain(
llm_chain=qa_chain,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
)
query = "What did the president say about russia"
retrieval_qa.run(query)
qa_chain_pydantic = create_qa_with_sources_chain(llm, output_parser="pydantic")
final_qa_chain_pydantic = StuffDocumentsChain(
llm_chain=qa_chain_pydantic,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa_pydantic = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain_pydantic
)
retrieval_qa_pydantic.run(query)
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\
Make sure to avoid using any unclear pronouns.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
condense_question_chain = LLMChain(
llm=llm,
prompt=CONDENSE_QUESTION_PROMPT,
)
qa = ConversationalRetrievalChain(
question_generator=condense_question_chain,
retriever=docsearch.as_retriever(),
memory=memory,
combine_docs_chain=final_qa_chain,
)
query = "What did the president say about Ketanji Brown Jackson"
result = qa({"question": query})
result
query = "what did he say about her predecessor?"
result = qa({"question": query})
result
from typing import List
from langchain.chains.openai_functions import create_qa_with_structure_chain
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.messages import HumanMessage, SystemMessage
from pydantic import BaseModel, Field
class CustomResponseSchema(BaseModel):
"""An answer to the question being asked, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
countries_referenced: List[str] = Field(
..., description="All of the countries mentioned in the sources"
)
sources: List[str] = Field(
..., description="List of sources used to answer the question"
)
prompt_messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
| HumanMessagePromptTemplate.from_template("{context}") | langchain.prompts.chat.HumanMessagePromptTemplate.from_template |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("embedding_distance")
evaluator.evaluate_strings(prediction="I shall go", reference="I shan't go")
evaluator.evaluate_strings(prediction="I shall go", reference="I will go")
from langchain.evaluation import EmbeddingDistance
list(EmbeddingDistance)
evaluator = load_evaluator(
"embedding_distance", distance_metric=EmbeddingDistance.EUCLIDEAN
)
from langchain_community.embeddings import HuggingFaceEmbeddings
embedding_model = | HuggingFaceEmbeddings() | langchain_community.embeddings.HuggingFaceEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet dingodb')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet git+https://[email protected]/dingodb/pydingo.git')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Dingo
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opencv-python scikit-image')
import os
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = "<your-key-here>"
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
from langchain_openai import OpenAI
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["image_desc"],
template="Generate a detailed prompt to generate an image based on the following description: {image_desc}",
)
chain = LLMChain(llm=llm, prompt=prompt)
image_url = DallEAPIWrapper().run(chain.run("halloween night at a haunted museum"))
image_url
try:
import google.colab
IN_COLAB = True
except ImportError:
IN_COLAB = False
if IN_COLAB:
from google.colab.patches import cv2_imshow # for image display
from skimage import io
image = io.imread(image_url)
cv2_imshow(image)
else:
import cv2
from skimage import io
image = io.imread(image_url)
cv2.imshow("image", image)
cv2.waitKey(0) # wait for a keyboard input
cv2.destroyAllWindows()
from langchain.agents import initialize_agent, load_tools
tools = load_tools(["dalle-image-generator"])
agent = | initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) | langchain.agents.initialize_agent |
from langchain.prompts import (
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
print(few_shot_prompt.format())
final_prompt = | ChatPromptTemplate.from_messages(
[
("system", "You are a wondrous wizard of math.") | langchain.prompts.ChatPromptTemplate.from_messages |
get_ipython().run_cell_magic('writefile', 'wechat_chats.txt', '女朋友 2023/09/16 2:51 PM\n天气有点凉\n\n男朋友 2023/09/16 2:51 PM\n珍簟凉风著,瑶琴寄恨生。嵇君懒书札,底物慰秋情。\n\n女朋友 2023/09/16 3:06 PM\n忙什么呢\n\n男朋友 2023/09/16 3:06 PM\n今天只干成了一件像样的事\n那就是想你\n\n女朋友 2023/09/16 3:06 PM\n[动画表情]\n')
import logging
import re
from typing import Iterator, List
from langchain_community.chat_loaders import base as chat_loaders
from langchain_core.messages import BaseMessage, HumanMessage
logger = logging.getLogger()
class WeChatChatLoader(chat_loaders.BaseChatLoader):
def __init__(self, path: str):
"""
Initialize the Discord chat loader.
Args:
path: Path to the exported Discord chat text file.
"""
self.path = path
self._message_line_regex = re.compile(
r"(?P<sender>.+?) (?P<timestamp>\d{4}/\d{2}/\d{2} \d{1,2}:\d{2} (?:AM|PM))", # noqa
)
def _append_message_to_results(
self,
results: List,
current_sender: str,
current_timestamp: str,
current_content: List[str],
):
content = "\n".join(current_content).strip()
if not re.match(r"\[.*\]", content):
results.append(
HumanMessage(
content=content,
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
return results
def _load_single_chat_session_from_txt(
self, file_path: str
) -> chat_loaders.ChatSession:
"""
Load a single chat session from a text file.
Args:
file_path: Path to the text file containing the chat messages.
Returns:
A `ChatSession` object containing the loaded chat messages.
"""
with open(file_path, "r", encoding="utf-8") as file:
lines = file.readlines()
results: List[BaseMessage] = []
current_sender = None
current_timestamp = None
current_content = []
for line in lines:
if re.match(self._message_line_regex, line):
if current_sender and current_content:
results = self._append_message_to_results(
results, current_sender, current_timestamp, current_content
)
current_sender, current_timestamp = re.match(
self._message_line_regex, line
).groups()
current_content = []
else:
current_content.append(line.strip())
if current_sender and current_content:
results = self._append_message_to_results(
results, current_sender, current_timestamp, current_content
)
return | chat_loaders.ChatSession(messages=results) | langchain_community.chat_loaders.base.ChatSession |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import GooseAI
from getpass import getpass
GOOSEAI_API_KEY = getpass()
os.environ["GOOSEAI_API_KEY"] = GOOSEAI_API_KEY
llm = | GooseAI() | langchain_community.llms.GooseAI |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("criteria", criteria="conciseness")
from langchain.evaluation import EvaluatorType
evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria="conciseness")
eval_result = evaluator.evaluate_strings(
prediction="What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.",
input="What's 2+2?",
)
print(eval_result)
evaluator = load_evaluator("labeled_criteria", criteria="correctness")
eval_result = evaluator.evaluate_strings(
input="What is the capital of the US?",
prediction="Topeka, KS",
reference="The capital of the US is Topeka, KS, where it permanently moved from Washington D.C. on May 16, 2023",
)
print(f'With ground truth: {eval_result["score"]}')
from langchain.evaluation import Criteria
list(Criteria)
custom_criterion = {
"numeric": "Does the output contain numeric or mathematical information?"
}
eval_chain = load_evaluator(
EvaluatorType.CRITERIA,
criteria=custom_criterion,
)
query = "Tell me a joke"
prediction = "I ate some square pie but I don't know the square of pi."
eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)
print(eval_result)
custom_criteria = {
"numeric": "Does the output contain numeric information?",
"mathematical": "Does the output contain mathematical information?",
"grammatical": "Is the output grammatically correct?",
"logical": "Is the output logical?",
}
eval_chain = load_evaluator(
EvaluatorType.CRITERIA,
criteria=custom_criteria,
)
eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)
print("Multi-criteria evaluation")
print(eval_result)
from langchain.chains.constitutional_ai.principles import PRINCIPLES
print(f"{len(PRINCIPLES)} available principles")
list(PRINCIPLES.items())[:5]
evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria=PRINCIPLES["harmful1"])
eval_result = evaluator.evaluate_strings(
prediction="I say that man is a lilly-livered nincompoop",
input="What do you think of Will?",
)
print(eval_result)
get_ipython().run_line_magic('pip', 'install --upgrade --quiet anthropic')
from langchain_community.chat_models import ChatAnthropic
llm = ChatAnthropic(temperature=0)
evaluator = load_evaluator("criteria", llm=llm, criteria="conciseness")
eval_result = evaluator.evaluate_strings(
prediction="What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.",
input="What's 2+2?",
)
print(eval_result)
from langchain.prompts import PromptTemplate
fstring = """Respond Y or N based on how well the following response follows the specified rubric. Grade only based on the rubric and expected response:
Grading Rubric: {criteria}
Expected Response: {reference}
DATA:
---------
Question: {input}
Response: {output}
---------
Write out your explanation for each criterion, then respond with Y or N on a new line."""
prompt = PromptTemplate.from_template(fstring)
evaluator = | load_evaluator("labeled_criteria", criteria="correctness", prompt=prompt) | langchain.evaluation.load_evaluator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet python-gitlab')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit
from langchain_community.utilities.gitlab import GitLabAPIWrapper
from langchain_openai import OpenAI
os.environ["GITLAB_URL"] = "https://gitlab.example.org"
os.environ["GITLAB_PERSONAL_ACCESS_TOKEN"] = ""
os.environ["GITLAB_REPOSITORY"] = "username/repo-name"
os.environ["GITLAB_BRANCH"] = "bot-branch-name"
os.environ["GITLAB_BASE_BRANCH"] = "main"
os.environ["OPENAI_API_KEY"] = ""
llm = OpenAI(temperature=0)
gitlab = GitLabAPIWrapper()
toolkit = | GitLabToolkit.from_gitlab_api_wrapper(gitlab) | langchain_community.agent_toolkits.gitlab.toolkit.GitLabToolkit.from_gitlab_api_wrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = | HubRunnable("rlm/rag-prompt") | langchain.runnables.hub.HubRunnable |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler
class MyModCallback(BaseModerationCallbackHandler):
async def on_after_pii(self, output_beacon, unique_id):
import json
moderation_type = output_beacon["moderation_type"]
chain_id = output_beacon["moderation_chain_id"]
with open(f"output-{moderation_type}-{chain_id}.json", "w") as file:
data = {"beacon_data": output_beacon, "unique_id": unique_id}
json.dump(data, file)
"""
async def on_after_toxicity(self, output_beacon, unique_id):
pass
async def on_after_prompt_safety(self, output_beacon, unique_id):
pass
"""
my_callback = MyModCallback()
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config])
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
unique_id="[email protected]", # A unique ID
moderation_callback=my_callback, # BaseModerationCallbackHandler
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = | FakeListLLM(responses=responses) | langchain_community.llms.fake.FakeListLLM |