prompt
stringlengths
51
10k
completion
stringlengths
8
362
api
stringlengths
18
90
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cohere') get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss') get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu') import getpass import os os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:") def pretty_print_docs(docs): print( f"\n{'-' * 100}\n".join( [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)] ) ) from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import CohereEmbeddings from langchain_community.vectorstores import FAISS from langchain_text_splitters import RecursiveCharacterTextSplitter documents = TextLoader("../../modules/state_of_the_union.txt").load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) texts = text_splitter.split_documents(documents) retriever = FAISS.from_documents(texts, CohereEmbeddings()).as_retriever( search_kwargs={"k": 20} ) query = "What did the president say about Ketanji Brown Jackson" docs = retriever.get_relevant_documents(query) pretty_print_docs(docs) from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank from langchain_community.llms import Cohere llm =
Cohere(temperature=0)
langchain_community.llms.Cohere
from langchain.prompts import ( ChatPromptTemplate, FewShotChatMessagePromptTemplate, ) examples = [ {"input": "2+2", "output": "4"}, {"input": "2+3", "output": "5"}, ] example_prompt = ChatPromptTemplate.from_messages( [ ("human", "{input}"), ("ai", "{output}"), ] ) few_shot_prompt = FewShotChatMessagePromptTemplate( example_prompt=example_prompt, examples=examples, ) print(few_shot_prompt.format()) final_prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a wondrous wizard of math."), few_shot_prompt, ("human", "{input}"), ] ) from langchain_community.chat_models import ChatAnthropic chain = final_prompt |
ChatAnthropic(temperature=0.0)
langchain_community.chat_models.ChatAnthropic
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai') import os os.environ["LABEL_STUDIO_URL"] = "<YOUR-LABEL-STUDIO-URL>" # e.g. http://localhost:8080 os.environ["LABEL_STUDIO_API_KEY"] = "<YOUR-LABEL-STUDIO-API-KEY>" os.environ["OPENAI_API_KEY"] = "<YOUR-OPENAI-API-KEY>" from langchain.callbacks import LabelStudioCallbackHandler from langchain_openai import OpenAI llm = OpenAI( temperature=0, callbacks=[
LabelStudioCallbackHandler(project_name="My Project")
langchain.callbacks.LabelStudioCallbackHandler
from langchain_community.llms.fake import FakeListLLM from langchain.agents import AgentType, initialize_agent, load_tools tools = load_tools(["python_repl"]) responses = ["Action: Python REPL\nAction Input: print(2 + 2)", "Final Answer: 4"] llm =
FakeListLLM(responses=responses)
langchain_community.llms.fake.FakeListLLM
import os import chromadb from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import DocumentCompressorPipeline from langchain.retrievers.merger_retriever import MergerRetriever from langchain_community.document_transformers import ( EmbeddingsClusteringFilter, EmbeddingsRedundantFilter, ) from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings all_mini = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") multi_qa_mini = HuggingFaceEmbeddings(model_name="multi-qa-MiniLM-L6-dot-v1") filter_embeddings = OpenAIEmbeddings() ABS_PATH = os.path.dirname(os.path.abspath(__file__)) DB_DIR = os.path.join(ABS_PATH, "db") client_settings = chromadb.config.Settings( is_persistent=True, persist_directory=DB_DIR, anonymized_telemetry=False, ) db_all = Chroma( collection_name="project_store_all", persist_directory=DB_DIR, client_settings=client_settings, embedding_function=all_mini, ) db_multi_qa = Chroma( collection_name="project_store_multi", persist_directory=DB_DIR, client_settings=client_settings, embedding_function=multi_qa_mini, ) retriever_all = db_all.as_retriever( search_type="similarity", search_kwargs={"k": 5, "include_metadata": True} ) retriever_multi_qa = db_multi_qa.as_retriever( search_type="mmr", search_kwargs={"k": 5, "include_metadata": True} ) lotr = MergerRetriever(retrievers=[retriever_all, retriever_multi_qa]) filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings) pipeline = DocumentCompressorPipeline(transformers=[filter]) compression_retriever = ContextualCompressionRetriever( base_compressor=pipeline, base_retriever=lotr ) filter_ordered_cluster = EmbeddingsClusteringFilter( embeddings=filter_embeddings, num_clusters=10, num_closest=1, ) filter_ordered_by_retriever = EmbeddingsClusteringFilter( embeddings=filter_embeddings, num_clusters=10, num_closest=1, sorted=True, ) pipeline =
DocumentCompressorPipeline(transformers=[filter_ordered_by_retriever])
langchain.retrievers.document_compressors.DocumentCompressorPipeline
from typing import List from langchain.output_parsers import PydanticOutputParser from langchain.prompts import PromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field, validator from langchain_openai import ChatOpenAI model = ChatOpenAI(temperature=0) class Joke(BaseModel): setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke") @validator("setup") def question_ends_with_question_mark(cls, field): if field[-1] != "?": raise ValueError("Badly formed question!") return field joke_query = "Tell me a joke." parser = PydanticOutputParser(pydantic_object=Joke) prompt = PromptTemplate( template="Answer the user query.\n{format_instructions}\n{query}\n", input_variables=["query"], partial_variables={"format_instructions": parser.get_format_instructions()}, ) chain = prompt | model | parser chain.invoke({"query": joke_query}) class Actor(BaseModel): name: str = Field(description="name of an actor") film_names: List[str] = Field(description="list of names of films they starred in") actor_query = "Generate the filmography for a random actor." parser =
PydanticOutputParser(pydantic_object=Actor)
langchain.output_parsers.PydanticOutputParser
from langchain.agents import AgentExecutor, BaseMultiActionAgent, Tool from langchain_community.utilities import SerpAPIWrapper def random_word(query: str) -> str: print("\nNow I'm doing this!") return "foo" search =
SerpAPIWrapper()
langchain_community.utilities.SerpAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia') from langchain import hub from langchain.agents import AgentExecutor, create_react_agent from langchain_community.tools import WikipediaQueryRun from langchain_community.utilities import WikipediaAPIWrapper from langchain_openai import OpenAI api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) tool = WikipediaQueryRun(api_wrapper=api_wrapper) tools = [tool] prompt = hub.pull("hwchase17/react") llm = OpenAI(temperature=0) agent =
create_react_agent(llm, tools, prompt)
langchain.agents.create_react_agent
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint from langchain_community.llms.azureml_endpoint import ( AzureMLEndpointApiType, LlamaContentFormatter, ) from langchain_core.messages import HumanMessage llm = AzureMLOnlineEndpoint( endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score", endpoint_api_type=AzureMLEndpointApiType.realtime, endpoint_api_key="my-api-key", content_formatter=LlamaContentFormatter(), model_kwargs={"temperature": 0.8, "max_new_tokens": 400}, ) response = llm.invoke("Write me a song about sparkling water:") response response = llm.invoke("Write me a song about sparkling water:", temperature=0.5) response from langchain_community.llms.azureml_endpoint import ( AzureMLEndpointApiType, LlamaContentFormatter, ) from langchain_core.messages import HumanMessage llm = AzureMLOnlineEndpoint( endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/v1/completions", endpoint_api_type=AzureMLEndpointApiType.serverless, endpoint_api_key="my-api-key", content_formatter=
LlamaContentFormatter()
langchain_community.llms.azureml_endpoint.LlamaContentFormatter
import os import yaml get_ipython().system('wget https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml -O openai_openapi.yaml') get_ipython().system('wget https://www.klarna.com/us/shopping/public/openai/v0/api-docs -O klarna_openapi.yaml') get_ipython().system('wget https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/spotify.com/1.0.0/openapi.yaml -O spotify_openapi.yaml') from langchain_community.agent_toolkits.openapi.spec import reduce_openapi_spec with open("openai_openapi.yaml") as f: raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader) openai_api_spec = reduce_openapi_spec(raw_openai_api_spec) with open("klarna_openapi.yaml") as f: raw_klarna_api_spec = yaml.load(f, Loader=yaml.Loader) klarna_api_spec = reduce_openapi_spec(raw_klarna_api_spec) with open("spotify_openapi.yaml") as f: raw_spotify_api_spec = yaml.load(f, Loader=yaml.Loader) spotify_api_spec =
reduce_openapi_spec(raw_spotify_api_spec)
langchain_community.agent_toolkits.openapi.spec.reduce_openapi_spec
import asyncio from langchain.callbacks import get_openai_callback from langchain_openai import OpenAI llm = OpenAI(temperature=0) with get_openai_callback() as cb: llm("What is the square root of 4?") total_tokens = cb.total_tokens assert total_tokens > 0 with
get_openai_callback()
langchain.callbacks.get_openai_callback
get_ipython().run_cell_magic('writefile', 'discord_chats.txt', "talkingtower β€” 08/15/2023 11:10 AM\nLove music! Do you like jazz?\nreporterbob β€” 08/15/2023 9:27 PM\nYes! Jazz is fantastic. Ever heard this one?\nWebsite\nListen to classic jazz track...\n\ntalkingtower β€” Yesterday at 5:03 AM\nIndeed! Great choice. 🎷\nreporterbob β€” Yesterday at 5:23 AM\nThanks! How about some virtual sightseeing?\nWebsite\nVirtual tour of famous landmarks...\n\ntalkingtower β€” Today at 2:38 PM\nSounds fun! Let's explore.\nreporterbob β€” Today at 2:56 PM\nEnjoy the tour! See you around.\ntalkingtower β€” Today at 3:00 PM\nThank you! Goodbye! πŸ‘‹\nreporterbob β€” Today at 3:02 PM\nFarewell! Happy exploring.\n") import logging import re from typing import Iterator, List from langchain_community.chat_loaders import base as chat_loaders from langchain_core.messages import BaseMessage, HumanMessage logger = logging.getLogger() class DiscordChatLoader(chat_loaders.BaseChatLoader): def __init__(self, path: str): """ Initialize the Discord chat loader. Args: path: Path to the exported Discord chat text file. """ self.path = path self._message_line_regex = re.compile( r"(.+?) β€” (\w{3,9} \d{1,2}(?:st|nd|rd|th)?(?:, \d{4})? \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa flags=re.DOTALL, ) def _load_single_chat_session_from_txt( self, file_path: str ) -> chat_loaders.ChatSession: """ Load a single chat session from a text file. Args: file_path: Path to the text file containing the chat messages. Returns: A `ChatSession` object containing the loaded chat messages. """ with open(file_path, "r", encoding="utf-8") as file: lines = file.readlines() results: List[BaseMessage] = [] current_sender = None current_timestamp = None current_content = [] for line in lines: if re.match( r".+? β€” (\d{2}/\d{2}/\d{4} \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa line, ): if current_sender and current_content: results.append( HumanMessage( content="".join(current_content).strip(), additional_kwargs={ "sender": current_sender, "events": [{"message_time": current_timestamp}], }, ) ) current_sender, current_timestamp = line.split(" β€” ")[:2] current_content = [ line[len(current_sender) + len(current_timestamp) + 4 :].strip() ] elif re.match(r"\[\d{1,2}:\d{2} (?:AM|PM)\]", line.strip()): results.append( HumanMessage( content="".join(current_content).strip(), additional_kwargs={ "sender": current_sender, "events": [{"message_time": current_timestamp}], }, ) ) current_timestamp = line.strip()[1:-1] current_content = [] else: current_content.append("\n" + line.strip()) if current_sender and current_content: results.append( HumanMessage( content="".join(current_content).strip(), additional_kwargs={ "sender": current_sender, "events": [{"message_time": current_timestamp}], }, ) ) return chat_loaders.ChatSession(messages=results) def lazy_load(self) -> Iterator[chat_loaders.ChatSession]: """ Lazy load the messages from the chat file and yield them in the required format. Yields: A `ChatSession` object containing the loaded chat messages. """ yield self._load_single_chat_session_from_txt(self.path) loader = DiscordChatLoader( path="./discord_chats.txt", ) from typing import List from langchain_community.chat_loaders.base import ChatSession from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) raw_messages = loader.lazy_load() merged_messages = merge_chat_runs(raw_messages) messages: List[ChatSession] = list(
map_ai_messages(merged_messages, sender="talkingtower")
langchain_community.chat_loaders.utils.map_ai_messages
from langchain_community.document_loaders import UnstructuredExcelLoader loader =
UnstructuredExcelLoader("example_data/stanley-cups.xlsx", mode="elements")
langchain_community.document_loaders.UnstructuredExcelLoader
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)') get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch') path = "/Users/rlm/Desktop/cpi/" from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader(path + "cpi.pdf") pdf_pages = loader.load() from langchain_text_splitters import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits_pypdf = text_splitter.split_documents(pdf_pages) all_splits_pypdf_texts = [d.page_content for d in all_splits_pypdf] from unstructured.partition.pdf import partition_pdf raw_pdf_elements = partition_pdf( filename=path + "cpi.pdf", extract_images_in_pdf=True, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) tables = [] texts = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): tables.append(str(element)) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): texts.append(str(element)) from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings baseline = Chroma.from_texts( texts=all_splits_pypdf_texts, collection_name="baseline", embedding=OpenAIEmbeddings(), ) retriever_baseline = baseline.as_retriever() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \ These summaries will be embedded and used to retrieve the raw text or table elements. \ Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) import base64 import io import os from io import BytesIO from langchain_core.messages import HumanMessage from PIL import Image def encode_image(image_path): """Getting the base64 string""" with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def image_summarize(img_base64, prompt): """Image summary""" chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024) msg = chat.invoke( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, }, ] ) ] ) return msg.content img_base64_list = [] image_summaries = [] prompt = """You are an assistant tasked with summarizing images for retrieval. \ These summaries will be embedded and used to retrieve the raw image. \ Give a concise summary of the image that is well optimized for retrieval.""" for img_file in sorted(os.listdir(path)): if img_file.endswith(".jpg"): img_path = os.path.join(path, img_file) base64_image = encode_image(img_path) img_base64_list.append(base64_image) image_summaries.append(image_summarize(base64_image, prompt)) import uuid from base64 import b64decode from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_core.documents import Document def create_multi_vector_retriever( vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images ): store = InMemoryStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key, ) def add_documents(retriever, doc_summaries, doc_contents): doc_ids = [str(uuid.uuid4()) for _ in doc_contents] summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(doc_summaries) ] retriever.vectorstore.add_documents(summary_docs) retriever.docstore.mset(list(zip(doc_ids, doc_contents))) if text_summaries: add_documents(retriever, text_summaries, texts) if table_summaries: add_documents(retriever, table_summaries, tables) if image_summaries: add_documents(retriever, image_summaries, images) return retriever multi_vector_img = Chroma( collection_name="multi_vector_img", embedding_function=OpenAIEmbeddings() ) retriever_multi_vector_img = create_multi_vector_retriever( multi_vector_img, text_summaries, texts, table_summaries, tables, image_summaries, img_base64_list, ) query = "What percentage of CPI is dedicated to Housing, and how does it compare to the combined percentage of Medical Care, Apparel, and Other Goods and Services?" suffix_for_images = " Include any pie charts, graphs, or tables." docs = retriever_multi_vector_img.get_relevant_documents(query + suffix_for_images) from IPython.display import HTML, display def plt_img_base64(img_base64): image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />' display(HTML(image_html)) plt_img_base64(docs[1]) multi_vector_text = Chroma( collection_name="multi_vector_text", embedding_function=OpenAIEmbeddings() ) retriever_multi_vector_img_summary = create_multi_vector_retriever( multi_vector_text, text_summaries, texts, table_summaries, tables, image_summaries, image_summaries, ) from langchain_experimental.open_clip import OpenCLIPEmbeddings multimodal_embd = Chroma( collection_name="multimodal_embd", embedding_function=
OpenCLIPEmbeddings()
langchain_experimental.open_clip.OpenCLIPEmbeddings
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain from langchain.prompts import PromptTemplate from langchain_openai import OpenAI, OpenAIEmbeddings base_embeddings = OpenAIEmbeddings() llm = OpenAI() embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search") result = embeddings.embed_query("Where is the Taj Mahal?") multi_llm =
OpenAI(n=4, best_of=4)
langchain_openai.OpenAI
from ragatouille import RAGPretrainedModel RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0") import requests def get_wikipedia_page(title: str): """ Retrieve the full text content of a Wikipedia page. :param title: str - Title of the Wikipedia page. :return: str - Full text content of the page as raw string. """ URL = "https://en.wikipedia.org/w/api.php" params = { "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, } headers = {"User-Agent": "RAGatouille_tutorial/0.0.1 ([email protected])"} response = requests.get(URL, params=params, headers=headers) data = response.json() page = next(iter(data["query"]["pages"].values())) return page["extract"] if "extract" in page else None full_document = get_wikipedia_page("Hayao_Miyazaki") RAG.index( collection=[full_document], index_name="Miyazaki-123", max_document_length=180, split_documents=True, ) results = RAG.search(query="What animation studio did Miyazaki found?", k=3) results retriever = RAG.as_langchain_retriever(k=3) retriever.invoke("What animation studio did Miyazaki found?") from langchain.chains import create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI prompt = ChatPromptTemplate.from_template( """Answer the following question based only on the provided context: <context> {context} </context> Question: {input}""" ) llm = ChatOpenAI() document_chain = create_stuff_documents_chain(llm, prompt) retrieval_chain =
create_retrieval_chain(retriever, document_chain)
langchain.chains.create_retrieval_chain
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic') import os import boto3 comprehend_client = boto3.client("comprehend", region_name="us-east-1") from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain comprehend_moderation = AmazonComprehendModerationChain( client=comprehend_client, verbose=True, # optional ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( ModerationPiiError, ) template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comprehend_moderation | {"input": (lambda x: x["output"]) | llm} | comprehend_moderation ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?" } ) except ModerationPiiError as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import ( BaseModerationConfig, ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5) moderation_config = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler class MyModCallback(BaseModerationCallbackHandler): async def on_after_pii(self, output_beacon, unique_id): import json moderation_type = output_beacon["moderation_type"] chain_id = output_beacon["moderation_chain_id"] with open(f"output-{moderation_type}-{chain_id}.json", "w") as file: data = {"beacon_data": output_beacon, "unique_id": unique_id} json.dump(data, file) """ async def on_after_toxicity(self, output_beacon, unique_id): pass async def on_after_prompt_safety(self, output_beacon, unique_id): pass """ my_callback = MyModCallback() pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config =
ModerationToxicityConfig(threshold=0.5)
langchain_experimental.comprehend_moderation.ModerationToxicityConfig
get_ipython().run_line_magic('pip', 'install --upgrade --quiet amadeus > /dev/null') import os os.environ["AMADEUS_CLIENT_ID"] = "CLIENT_ID" os.environ["AMADEUS_CLIENT_SECRET"] = "CLIENT_SECRET" os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY" from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit toolkit = AmadeusToolkit() tools = toolkit.get_tools() from langchain_community.llms import HuggingFaceHub os.environ["HUGGINGFACEHUB_API_TOKEN"] = "YOUR_HF_API_TOKEN" llm = HuggingFaceHub( repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.5, "max_length": 64}, ) toolkit_hf = AmadeusToolkit(llm=llm) from langchain import hub from langchain.agents import AgentExecutor, create_react_agent from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser from langchain.tools.render import render_text_description_and_args from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0) prompt =
hub.pull("hwchase17/react-json")
langchain.hub.pull
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clearml') get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas') get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat') get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy') get_ipython().system('python -m spacy download en_core_web_sm') import os os.environ["CLEARML_API_ACCESS_KEY"] = "" os.environ["CLEARML_API_SECRET_KEY"] = "" os.environ["OPENAI_API_KEY"] = "" os.environ["SERPAPI_API_KEY"] = "" from langchain.callbacks import ClearMLCallbackHandler from langchain.callbacks import StdOutCallbackHandler from langchain_openai import OpenAI clearml_callback = ClearMLCallbackHandler( task_type="inference", project_name="langchain_callback_demo", task_name="llm", tags=["test"], visualize=True, complexity_metrics=True, stream_logs=True, ) callbacks = [StdOutCallbackHandler(), clearml_callback] llm =
OpenAI(temperature=0, callbacks=callbacks)
langchain_openai.OpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql') get_ipython().system('pip install sqlalchemy') get_ipython().system('pip install langchain') from langchain.chains import RetrievalQA from langchain_community.document_loaders import ( DirectoryLoader, UnstructuredMarkdownLoader, ) from langchain_community.vectorstores.apache_doris import ( ApacheDoris, ApacheDorisSettings, ) from langchain_openai import OpenAI, OpenAIEmbeddings from langchain_text_splitters import TokenTextSplitter update_vectordb = False loader = DirectoryLoader( "./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader ) documents = loader.load() text_splitter = TokenTextSplitter(chunk_size=400, chunk_overlap=50) split_docs = text_splitter.split_documents(documents) update_vectordb = True def gen_apache_doris(update_vectordb, embeddings, settings): if update_vectordb: docsearch = ApacheDoris.from_documents(split_docs, embeddings, config=settings) else: docsearch = ApacheDoris(embeddings, settings) return docsearch import os from getpass import getpass os.environ["OPENAI_API_KEY"] = getpass() update_vectordb = True embeddings = OpenAIEmbeddings() settings =
ApacheDorisSettings()
langchain_community.vectorstores.apache_doris.ApacheDorisSettings
import os from langchain.chains import ConversationalRetrievalChain from langchain_community.vectorstores import Vectara from langchain_openai import OpenAI from langchain_community.document_loaders import TextLoader loader =
TextLoader("state_of_the_union.txt")
langchain_community.document_loaders.TextLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary') get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken') YBUSER = "[SANDBOX USER]" YBPASSWORD = "[SANDBOX PASSWORD]" YBDATABASE = "[SANDBOX_DATABASE]" YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com" OPENAI_API_KEY = "[OPENAI API KEY]" import os import pathlib import re import sys import urllib.parse as urlparse from getpass import getpass import psycopg2 from IPython.display import Markdown, display from langchain.chains import LLMChain, RetrievalQAWithSourcesChain from langchain.docstore.document import Document from langchain_community.vectorstores import Yellowbrick from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter yellowbrick_connection_string = ( f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}" ) YB_DOC_DATABASE = "sample_data" YB_DOC_TABLE = "yellowbrick_documentation" embedding_table = "my_embeddings" os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) system_template = """If you don't know the answer, Make up your best guess.""" messages = [
SystemMessagePromptTemplate.from_template(system_template)
langchain.prompts.chat.SystemMessagePromptTemplate.from_template
from langchain_community.embeddings import FakeEmbeddings from langchain_community.vectorstores import Vectara from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableLambda, RunnablePassthrough vectara =
Vectara.from_files(["state_of_the_union.txt"])
langchain_community.vectorstores.Vectara.from_files
get_ipython().run_line_magic('pip', 'install --upgrade --quiet GitPython') from git import Repo repo = Repo.clone_from( "https://github.com/langchain-ai/langchain", to_path="./example_data/test_repo1" ) branch = repo.head.reference from langchain_community.document_loaders import GitLoader loader =
GitLoader(repo_path="./example_data/test_repo1/", branch=branch)
langchain_community.document_loaders.GitLoader
from langchain_core.messages import ( AIMessage, BaseMessage, FunctionMessage, HumanMessage, SystemMessage, ToolMessage, ) from langchain_core.messages import ( AIMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessageChunk, ) AIMessageChunk(content="Hello") + AIMessageChunk(content=" World!") from typing import Any, AsyncIterator, Dict, Iterator, List, Optional from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain_core.language_models import BaseChatModel, SimpleChatModel from langchain_core.messages import AIMessageChunk, BaseMessage, HumanMessage from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_core.runnables import run_in_executor class CustomChatModelAdvanced(BaseChatModel): """A custom chat model that echoes the first `n` characters of the input. When contributing an implementation to LangChain, carefully document the model including the initialization parameters, include an example of how to initialize the model and include any relevant links to the underlying models documentation or API. Example: .. code-block:: python model = CustomChatModel(n=2) result = model.invoke([HumanMessage(content="hello")]) result = model.batch([[HumanMessage(content="hello")], [HumanMessage(content="world")]]) """ n: int """The number of characters from the last message of the prompt to be echoed.""" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Override the _generate method to implement the chat model logic. This can be a call to an API, a call to a local model, or any other implementation that generates a response to the input prompt. Args: messages: the prompt composed of a list of messages. stop: a list of strings on which the model should stop generating. If generation stops due to a stop token, the stop token itself SHOULD BE INCLUDED as part of the output. This is not enforced across models right now, but it's a good practice to follow since it makes it much easier to parse the output of the model downstream and understand why generation stopped. run_manager: A run manager with callbacks for the LLM. """ last_message = messages[-1] tokens = last_message.content[: self.n] message = AIMessage(content=tokens) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: """Stream the output of the model. This method should be implemented if the model can generate output in a streaming fashion. If the model does not support streaming, do not implement it. In that case streaming requests will be automatically handled by the _generate method. Args: messages: the prompt composed of a list of messages. stop: a list of strings on which the model should stop generating. If generation stops due to a stop token, the stop token itself SHOULD BE INCLUDED as part of the output. This is not enforced across models right now, but it's a good practice to follow since it makes it much easier to parse the output of the model downstream and understand why generation stopped. run_manager: A run manager with callbacks for the LLM. """ last_message = messages[-1] tokens = last_message.content[: self.n] for token in tokens: chunk = ChatGenerationChunk(message=
AIMessageChunk(content=token)
langchain_core.messages.AIMessageChunk
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from langchain.prompts import PromptTemplate from langchain_community.utilities import GoogleSearchAPIWrapper from langchain_openai import OpenAI template = """This is a conversation between a human and a bot: {chat_history} Write a summary of the conversation for {input}: """ prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template) memory = ConversationBufferMemory(memory_key="chat_history") readonlymemory = ReadOnlySharedMemory(memory=memory) summary_chain = LLMChain( llm=OpenAI(), prompt=prompt, verbose=True, memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory ) search = GoogleSearchAPIWrapper() tools = [ Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events", ), Tool( name="Summary", func=summary_chain.run, description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.", ), ] prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:""" suffix = """Begin!" {chat_history} Question: {input} {agent_scratchpad}""" prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=["input", "chat_history", "agent_scratchpad"], ) llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt) agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True) agent_chain = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=True, memory=memory ) agent_chain.run(input="What is ChatGPT?") agent_chain.run(input="Who developed it?") agent_chain.run( input="Thanks. Summarize the conversation, for my daughter 5 years old." ) print(agent_chain.memory.buffer) template = """This is a conversation between a human and a bot: {chat_history} Write a summary of the conversation for {input}: """ prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template) memory = ConversationBufferMemory(memory_key="chat_history") summary_chain = LLMChain( llm=OpenAI(), prompt=prompt, verbose=True, memory=memory, # <--- this is the only change ) search = GoogleSearchAPIWrapper() tools = [ Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events", ), Tool( name="Summary", func=summary_chain.run, description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.", ), ] prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:""" suffix = """Begin!" {chat_history} Question: {input} {agent_scratchpad}""" prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=["input", "chat_history", "agent_scratchpad"], ) llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt) agent =
ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
langchain.agents.ZeroShotAgent
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark chromadb') from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings docs = [ Document( page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), Document( page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2}, ), Document( page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}, ), Document( page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}, ), Document( page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}, ), Document( page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={ "year": 1979, "director": "Andrei Tarkovsky", "genre": "thriller", "rating": 9.9, }, ), ] vectorstore = Chroma.from_documents(docs, OpenAIEmbeddings()) from langchain.chains.query_constructor.base import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain_openai import ChatOpenAI metadata_field_info = [ AttributeInfo( name="genre", description="The genre of the movie. One of ['science fiction', 'comedy', 'drama', 'thriller', 'romance', 'action', 'animated']", type="string", ), AttributeInfo( name="year", description="The year the movie was released", type="integer", ), AttributeInfo( name="director", description="The name of the movie director", type="string", ), AttributeInfo( name="rating", description="A 1-10 rating for the movie", type="float" ), ] document_content_description = "Brief summary of a movie" llm = ChatOpenAI(temperature=0) retriever = SelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info, ) retriever.invoke("I want to watch a movie rated higher than 8.5") retriever.invoke("Has Greta Gerwig directed any movies about women") retriever.invoke("What's a highly rated (above 8.5) science fiction film?") retriever.invoke( "What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated" ) retriever = SelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info, enable_limit=True, ) retriever.invoke("What are two movies about dinosaurs") from langchain.chains.query_constructor.base import ( StructuredQueryOutputParser, get_query_constructor_prompt, ) prompt = get_query_constructor_prompt( document_content_description, metadata_field_info, ) output_parser = StructuredQueryOutputParser.from_components() query_constructor = prompt | llm | output_parser print(prompt.format(query="dummy question")) query_constructor.invoke( { "query": "What are some sci-fi movies from the 90's directed by Luc Besson about taxi drivers" } ) from langchain.retrievers.self_query.chroma import ChromaTranslator retriever = SelfQueryRetriever( query_constructor=query_constructor, vectorstore=vectorstore, structured_query_translator=
ChromaTranslator()
langchain.retrievers.self_query.chroma.ChromaTranslator
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb') from langchain_community.document_loaders import WebBaseLoader from langchain_text_splitters import RecursiveCharacterTextSplitter loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) from langchain_community.embeddings import GPT4AllEmbeddings from langchain_community.vectorstores import Chroma vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings()) question = "What are the approaches to Task Decomposition?" docs = vectorstore.similarity_search(question) len(docs) docs[0] get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python') get_ipython().system(' CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 /Users/rlm/miniforge3/envs/llama/bin/pip install -U llama-cpp-python --no-cache-dir') from langchain_community.llms import LlamaCpp n_gpu_layers = 1 # Metal set to 1 is enough. n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip. llm = LlamaCpp( model_path="/Users/rlm/Desktop/Code/llama.cpp/models/llama-2-13b-chat.ggufv3.q4_0.bin", n_gpu_layers=n_gpu_layers, n_batch=n_batch, n_ctx=2048, f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls verbose=True, ) llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver") from langchain_community.llms import GPT4All gpt4all = GPT4All( model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin", max_tokens=2048, ) from langchain_community.llms.llamafile import Llamafile llamafile = Llamafile() llamafile.invoke("Here is my grandmother's beloved recipe for spaghetti and meatballs:") from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate prompt = PromptTemplate.from_template( "Summarize the main themes in these retrieved docs: {docs}" ) def format_docs(docs): return "\n\n".join(doc.page_content for doc in docs) chain = {"docs": format_docs} | prompt | llm | StrOutputParser() question = "What are the approaches to Task Decomposition?" docs = vectorstore.similarity_search(question) chain.invoke(docs) from langchain import hub rag_prompt = hub.pull("rlm/rag-prompt") rag_prompt.messages from langchain_core.runnables import RunnablePassthrough, RunnablePick chain = ( RunnablePassthrough.assign(context=RunnablePick("context") | format_docs) | rag_prompt | llm | StrOutputParser() ) chain.invoke({"context": docs, "question": question}) rag_prompt_llama = hub.pull("rlm/rag-prompt-llama") rag_prompt_llama.messages chain = ( RunnablePassthrough.assign(context=
RunnablePick("context")
langchain_core.runnables.RunnablePick
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb') from langchain_community.document_loaders import WebBaseLoader from langchain_text_splitters import RecursiveCharacterTextSplitter loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) from langchain_community.embeddings import GPT4AllEmbeddings from langchain_community.vectorstores import Chroma vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings()) question = "What are the approaches to Task Decomposition?" docs = vectorstore.similarity_search(question) len(docs) docs[0] get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python') get_ipython().system(' CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 /Users/rlm/miniforge3/envs/llama/bin/pip install -U llama-cpp-python --no-cache-dir') from langchain_community.llms import LlamaCpp n_gpu_layers = 1 # Metal set to 1 is enough. n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip. llm = LlamaCpp( model_path="/Users/rlm/Desktop/Code/llama.cpp/models/llama-2-13b-chat.ggufv3.q4_0.bin", n_gpu_layers=n_gpu_layers, n_batch=n_batch, n_ctx=2048, f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls verbose=True, ) llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver") from langchain_community.llms import GPT4All gpt4all = GPT4All( model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin", max_tokens=2048, ) from langchain_community.llms.llamafile import Llamafile llamafile = Llamafile() llamafile.invoke("Here is my grandmother's beloved recipe for spaghetti and meatballs:") from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate prompt = PromptTemplate.from_template( "Summarize the main themes in these retrieved docs: {docs}" ) def format_docs(docs): return "\n\n".join(doc.page_content for doc in docs) chain = {"docs": format_docs} | prompt | llm | StrOutputParser() question = "What are the approaches to Task Decomposition?" docs = vectorstore.similarity_search(question) chain.invoke(docs) from langchain import hub rag_prompt = hub.pull("rlm/rag-prompt") rag_prompt.messages from langchain_core.runnables import RunnablePassthrough, RunnablePick chain = ( RunnablePassthrough.assign(context=RunnablePick("context") | format_docs) | rag_prompt | llm | StrOutputParser() ) chain.invoke({"context": docs, "question": question}) rag_prompt_llama = hub.pull("rlm/rag-prompt-llama") rag_prompt_llama.messages chain = ( RunnablePassthrough.assign(context=RunnablePick("context") | format_docs) | rag_prompt_llama | llm | StrOutputParser() ) chain.invoke({"context": docs, "question": question}) retriever = vectorstore.as_retriever() qa_chain = ( {"context": retriever | format_docs, "question":
RunnablePassthrough()
langchain_core.runnables.RunnablePassthrough
get_ipython().run_line_magic('pip', 'install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2') import os from langchain_community.llms import HuggingFaceTextGenInference ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>" HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") llm = HuggingFaceTextGenInference( inference_server_url=ENDPOINT_URL, max_new_tokens=512, top_k=50, temperature=0.1, repetition_penalty=1.03, server_kwargs={ "headers": { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json", } }, ) from langchain_community.llms import HuggingFaceEndpoint ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>" llm = HuggingFaceEndpoint( endpoint_url=ENDPOINT_URL, task="text-generation", model_kwargs={ "max_new_tokens": 512, "top_k": 50, "temperature": 0.1, "repetition_penalty": 1.03, }, ) from langchain_community.llms import HuggingFaceHub llm = HuggingFaceHub( repo_id="HuggingFaceH4/zephyr-7b-beta", task="text-generation", model_kwargs={ "max_new_tokens": 512, "top_k": 30, "temperature": 0.1, "repetition_penalty": 1.03, }, ) from langchain.schema import ( HumanMessage, SystemMessage, ) from langchain_community.chat_models.huggingface import ChatHuggingFace messages = [ SystemMessage(content="You're a helpful assistant"), HumanMessage( content="What happens when an unstoppable force meets an immovable object?" ), ] chat_model = ChatHuggingFace(llm=llm) chat_model.model_id chat_model._to_chat_prompt(messages) res = chat_model.invoke(messages) print(res.content) from langchain import hub from langchain.agents import AgentExecutor, load_tools from langchain.agents.format_scratchpad import format_log_to_str from langchain.agents.output_parsers import ( ReActJsonSingleInputOutputParser, ) from langchain.tools.render import render_text_description from langchain_community.utilities import SerpAPIWrapper tools = load_tools(["serpapi", "llm-math"], llm=llm) prompt =
hub.pull("hwchase17/react-json")
langchain.hub.pull
from langchain_community.utils.openai_functions import ( convert_pydantic_to_openai_function, ) from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field, validator from langchain_openai import ChatOpenAI class Joke(BaseModel): """Joke to tell user.""" setup: str =
Field(description="question to set up a joke")
langchain_core.pydantic_v1.Field
get_ipython().system('pip3 install clickhouse-sqlalchemy InstructorEmbedding sentence_transformers openai langchain-experimental') import getpass from os import environ from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.utilities import SQLDatabase from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain from langchain_openai import OpenAI from sqlalchemy import MetaData, create_engine MYSCALE_HOST = "msc-4a9e710a.us-east-1.aws.staging.myscale.cloud" MYSCALE_PORT = 443 MYSCALE_USER = "chatdata" MYSCALE_PASSWORD = "myscale_rocks" OPENAI_API_KEY = getpass.getpass("OpenAI API Key:") engine = create_engine( f"clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/default?protocol=https" ) metadata = MetaData(bind=engine) environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain_community.embeddings import HuggingFaceInstructEmbeddings from langchain_experimental.sql.vector_sql import VectorSQLOutputParser output_parser = VectorSQLOutputParser.from_embeddings( model=HuggingFaceInstructEmbeddings( model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"} ) ) from langchain.callbacks import StdOutCallbackHandler from langchain_community.utilities.sql_database import SQLDatabase from langchain_experimental.sql.prompt import MYSCALE_PROMPT from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain from langchain_openai import OpenAI chain = VectorSQLDatabaseChain( llm_chain=LLMChain( llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0), prompt=MYSCALE_PROMPT, ), top_k=10, return_direct=True, sql_cmd_parser=output_parser, database=SQLDatabase(engine, None, metadata), ) import pandas as pd pd.DataFrame( chain.run( "Please give me 10 papers to ask what is PageRank?", callbacks=[StdOutCallbackHandler()], ) ) from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain_experimental.retrievers.vector_sql_database import ( VectorSQLDatabaseChainRetriever, ) from langchain_experimental.sql.prompt import MYSCALE_PROMPT from langchain_experimental.sql.vector_sql import ( VectorSQLDatabaseChain, VectorSQLRetrieveAllOutputParser, ) from langchain_openai import ChatOpenAI output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings( output_parser.model ) chain = VectorSQLDatabaseChain.from_llm( llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0), prompt=MYSCALE_PROMPT, top_k=10, return_direct=True, db=
SQLDatabase(engine, None, metadata)
langchain_community.utilities.sql_database.SQLDatabase
from langchain_community.vectorstores import Bagel texts = ["hello bagel", "hello langchain", "I love salad", "my car", "a dog"] cluster = Bagel.from_texts(cluster_name="testing", texts=texts) cluster.similarity_search("bagel", k=3) cluster.similarity_search_with_score("bagel", k=3) cluster.delete_cluster() from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents)[:10] cluster = Bagel.from_documents(cluster_name="testing_with_docs", documents=docs) query = "What did the president say about Ketanji Brown Jackson" docs = cluster.similarity_search(query) print(docs[0].page_content[:102]) texts = ["hello bagel", "this is langchain"] cluster = Bagel.from_texts(cluster_name="testing", texts=texts) cluster_data = cluster.get() cluster_data.keys() cluster_data cluster.delete_cluster() texts = ["hello bagel", "this is langchain"] metadatas = [{"source": "notion"}, {"source": "google"}] cluster =
Bagel.from_texts(cluster_name="testing", texts=texts, metadatas=metadatas)
langchain_community.vectorstores.Bagel.from_texts
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-firestore') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable firestore.googleapis.com') from langchain_core.documents.base import Document from langchain_google_firestore import FirestoreSaver saver = FirestoreSaver() data = [Document(page_content="Hello, World!")] saver.upsert_documents(data) saver = FirestoreSaver("Collection") saver.upsert_documents(data) doc_ids = ["AnotherCollection/doc_id", "foo/bar"] saver = FirestoreSaver() saver.upsert_documents(documents=data, document_ids=doc_ids) from langchain_google_firestore import FirestoreLoader loader_collection = FirestoreLoader("Collection") loader_subcollection =
FirestoreLoader("Collection/doc/SubCollection")
langchain_google_firestore.FirestoreLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cohere') get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss') get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu') import getpass import os os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:") def pretty_print_docs(docs): print( f"\n{'-' * 100}\n".join( [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)] ) ) from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import CohereEmbeddings from langchain_community.vectorstores import FAISS from langchain_text_splitters import RecursiveCharacterTextSplitter documents = TextLoader("../../modules/state_of_the_union.txt").load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) texts = text_splitter.split_documents(documents) retriever = FAISS.from_documents(texts, CohereEmbeddings()).as_retriever( search_kwargs={"k": 20} ) query = "What did the president say about Ketanji Brown Jackson" docs = retriever.get_relevant_documents(query) pretty_print_docs(docs) from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank from langchain_community.llms import Cohere llm = Cohere(temperature=0) compressor = CohereRerank() compression_retriever = ContextualCompressionRetriever( base_compressor=compressor, base_retriever=retriever ) compressed_docs = compression_retriever.get_relevant_documents( "What did the president say about Ketanji Jackson Brown" ) pretty_print_docs(compressed_docs) from langchain.chains import RetrievalQA chain = RetrievalQA.from_chain_type( llm=
Cohere(temperature=0)
langchain_community.llms.Cohere
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain sentence_transformers') from langchain_community.embeddings import HuggingFaceEmbeddings embeddings = HuggingFaceEmbeddings() text = "This is a test document." query_result = embeddings.embed_query(text) query_result[:3] doc_result = embeddings.embed_documents([text]) import getpass inference_api_key = getpass.getpass("Enter your HF Inference API Key:\n\n") from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings embeddings = HuggingFaceInferenceAPIEmbeddings( api_key=inference_api_key, model_name="sentence-transformers/all-MiniLM-l6-v2" ) query_result = embeddings.embed_query(text) query_result[:3] get_ipython().system('pip install huggingface_hub') from langchain_community.embeddings import HuggingFaceHubEmbeddings embeddings =
HuggingFaceHubEmbeddings()
langchain_community.embeddings.HuggingFaceHubEmbeddings
from langchain_community.utils.openai_functions import ( convert_pydantic_to_openai_function, ) from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field, validator from langchain_openai import ChatOpenAI class Joke(BaseModel): """Joke to tell user.""" setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke") openai_functions = [convert_pydantic_to_openai_function(Joke)] model = ChatOpenAI(temperature=0) prompt = ChatPromptTemplate.from_messages( [("system", "You are helpful assistant"), ("user", "{input}")] ) from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser parser = JsonOutputFunctionsParser() chain = prompt | model.bind(functions=openai_functions) | parser chain.invoke({"input": "tell me a joke"}) for s in chain.stream({"input": "tell me a joke"}): print(s) from typing import List from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser class Jokes(BaseModel): """Jokes to tell user.""" joke: List[Joke] funniness_level: int parser =
JsonKeyOutputFunctionsParser(key_name="joke")
langchain.output_parsers.openai_functions.JsonKeyOutputFunctionsParser
from langchain.agents import AgentType, initialize_agent from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit from langchain_community.utilities.nasa import NasaAPIWrapper from langchain_openai import OpenAI llm = OpenAI(temperature=0, openai_api_key="") nasa =
NasaAPIWrapper()
langchain_community.utilities.nasa.NasaAPIWrapper
get_ipython().run_line_magic('pip', 'install -qU langchain-text-splitters') from langchain_text_splitters import HTMLHeaderTextSplitter html_string = """ <!DOCTYPE html> <html> <body> <div> <h1>Foo</h1> <p>Some intro text about Foo.</p> <div> <h2>Bar main section</h2> <p>Some intro text about Bar.</p> <h3>Bar subsection 1</h3> <p>Some text about the first subtopic of Bar.</p> <h3>Bar subsection 2</h3> <p>Some text about the second subtopic of Bar.</p> </div> <div> <h2>Baz</h2> <p>Some text about Baz</p> </div> <br> <p>Some concluding text about Foo</p> </div> </body> </html> """ headers_to_split_on = [ ("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3"), ] html_splitter = HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on) html_header_splits = html_splitter.split_text(html_string) html_header_splits from langchain_text_splitters import RecursiveCharacterTextSplitter url = "https://plato.stanford.edu/entries/goedel/" headers_to_split_on = [ ("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3"), ("h4", "Header 4"), ] html_splitter =
HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
langchain_text_splitters.HTMLHeaderTextSplitter
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken") from langchain_community.vectorstores import DeepLake from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") activeloop_token = getpass.getpass("activeloop token:") embeddings = OpenAIEmbeddings() from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, overwrite=True) db.add_documents(docs) query = "What did the president say about Ketanji Brown Jackson" docs = db.similarity_search(query) print(docs[0].page_content) db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, read_only=True) docs = db.similarity_search(query) from langchain.chains import RetrievalQA from langchain_openai import OpenAIChat qa = RetrievalQA.from_chain_type( llm=OpenAIChat(model="gpt-3.5-turbo"), chain_type="stuff", retriever=db.as_retriever(), ) query = "What did the president say about Ketanji Brown Jackson" qa.run(query) import random for d in docs: d.metadata["year"] = random.randint(2012, 2014) db = DeepLake.from_documents( docs, embeddings, dataset_path="./my_deeplake/", overwrite=True ) db.similarity_search( "What did the president say about Ketanji Brown Jackson", filter={"metadata": {"year": 2013}}, ) db.similarity_search( "What did the president say about Ketanji Brown Jackson?", distance_metric="cos" ) db.max_marginal_relevance_search( "What did the president say about Ketanji Brown Jackson?" ) db.delete_dataset() DeepLake.force_delete_by_path("./my_deeplake") os.environ["ACTIVELOOP_TOKEN"] = activeloop_token username = "<USERNAME_OR_ORG>" # your username on app.activeloop.ai dataset_path = f"hub://{username}/langchain_testing_python" # could be also ./local/path (much faster locally), s3://bucket/path/to/dataset, gcs://path/to/dataset, etc. docs = text_splitter.split_documents(documents) embedding = OpenAIEmbeddings() db =
DeepLake(dataset_path=dataset_path, embedding=embeddings, overwrite=True)
langchain_community.vectorstores.DeepLake
get_ipython().run_line_magic('pip', 'install --upgrade --quiet supabase') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") os.environ["SUPABASE_URL"] = getpass.getpass("Supabase URL:") os.environ["SUPABASE_SERVICE_KEY"] = getpass.getpass("Supabase Service Key:") from dotenv import load_dotenv load_dotenv() import os from langchain_community.vectorstores import SupabaseVectorStore from langchain_openai import OpenAIEmbeddings from supabase.client import Client, create_client supabase_url = os.environ.get("SUPABASE_URL") supabase_key = os.environ.get("SUPABASE_SERVICE_KEY") supabase: Client = create_client(supabase_url, supabase_key) embeddings = OpenAIEmbeddings() from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader =
TextLoader("../../modules/state_of_the_union.txt")
langchain_community.document_loaders.TextLoader
import os os.environ["BING_SUBSCRIPTION_KEY"] = "<key>" os.environ["BING_SEARCH_URL"] = "https://api.bing.microsoft.com/v7.0/search" from langchain_community.utilities import BingSearchAPIWrapper search = BingSearchAPIWrapper() search.run("python") search = BingSearchAPIWrapper(k=1) search.run("python") search =
BingSearchAPIWrapper()
langchain_community.utilities.BingSearchAPIWrapper
import boto3 dynamodb = boto3.resource("dynamodb") table = dynamodb.create_table( TableName="SessionTable", KeySchema=[{"AttributeName": "SessionId", "KeyType": "HASH"}], AttributeDefinitions=[{"AttributeName": "SessionId", "AttributeType": "S"}], BillingMode="PAY_PER_REQUEST", ) table.meta.client.get_waiter("table_exists").wait(TableName="SessionTable") print(table.item_count) from langchain_community.chat_message_histories import DynamoDBChatMessageHistory history = DynamoDBChatMessageHistory(table_name="SessionTable", session_id="0") history.add_user_message("hi!") history.add_ai_message("whats up?") history.messages from langchain_community.chat_message_histories import DynamoDBChatMessageHistory history = DynamoDBChatMessageHistory( table_name="SessionTable", session_id="0", endpoint_url="http://localhost.localstack.cloud:4566", ) from langchain_community.chat_message_histories import DynamoDBChatMessageHistory composite_table = dynamodb.create_table( TableName="CompositeTable", KeySchema=[ {"AttributeName": "PK", "KeyType": "HASH"}, {"AttributeName": "SK", "KeyType": "RANGE"}, ], AttributeDefinitions=[ {"AttributeName": "PK", "AttributeType": "S"}, {"AttributeName": "SK", "AttributeType": "S"}, ], BillingMode="PAY_PER_REQUEST", ) composite_table.meta.client.get_waiter("table_exists").wait(TableName="CompositeTable") print(composite_table.item_count) my_key = { "PK": "session_id::0", "SK": "langchain_history", } composite_key_history = DynamoDBChatMessageHistory( table_name="CompositeTable", session_id="0", endpoint_url="http://localhost.localstack.cloud:4566", key=my_key, ) composite_key_history.add_user_message("hello, composite dynamodb table!") composite_key_history.messages from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables.history import RunnableWithMessageHistory from langchain_openai import ChatOpenAI prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant."),
MessagesPlaceholder(variable_name="history")
langchain_core.prompts.MessagesPlaceholder
from langchain_community.tools.edenai import ( EdenAiExplicitImageTool, EdenAiObjectDetectionTool, EdenAiParsingIDTool, EdenAiParsingInvoiceTool, EdenAiSpeechToTextTool, EdenAiTextModerationTool, EdenAiTextToSpeechTool, ) from langchain.agents import AgentType, initialize_agent from langchain_community.llms import EdenAI llm = EdenAI( feature="text", provider="openai", params={"temperature": 0.2, "max_tokens": 250} ) tools = [ EdenAiTextModerationTool(providers=["openai"], language="en"),
EdenAiObjectDetectionTool(providers=["google", "api4ai"])
langchain_community.tools.edenai.EdenAiObjectDetectionTool
from langchain.output_parsers import ( OutputFixingParser, PydanticOutputParser, ) from langchain.prompts import ( PromptTemplate, ) from langchain_core.pydantic_v1 import BaseModel, Field from langchain_openai import ChatOpenAI, OpenAI template = """Based on the user question, provide an Action and Action Input for what step should be taken. {format_instructions} Question: {query} Response:""" class Action(BaseModel): action: str = Field(description="action to take") action_input: str = Field(description="input to the action") parser =
PydanticOutputParser(pydantic_object=Action)
langchain.output_parsers.PydanticOutputParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet slack_sdk > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4 > /dev/null # This is optional but is useful for parsing HTML messages') get_ipython().run_line_magic('pip', 'install --upgrade --quiet python-dotenv > /dev/null # This is for loading environmental variables from a .env file') import dotenv dotenv.load_dotenv() from langchain_community.agent_toolkits import SlackToolkit toolkit =
SlackToolkit()
langchain_community.agent_toolkits.SlackToolkit
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml') from typing import Any from pydantic import BaseModel from unstructured.partition.pdf import partition_pdf path = "/Users/rlm/Desktop/Papers/LLaVA/" raw_pdf_elements = partition_pdf( filename=path + "LLaVA.pdf", extract_images_in_pdf=True, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) category_counts = {} for element in raw_pdf_elements: category = str(type(element)) if category in category_counts: category_counts[category] += 1 else: category_counts[category] = 1 unique_categories = set(category_counts.keys()) category_counts class Element(BaseModel): type: str text: Any categorized_elements = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): categorized_elements.append(Element(type="table", text=str(element))) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): categorized_elements.append(Element(type="text", text=str(element))) table_elements = [e for e in categorized_elements if e.type == "table"] print(len(table_elements)) text_elements = [e for e in categorized_elements if e.type == "text"] print(len(text_elements)) from langchain_community.chat_models import ChatOllama from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate prompt_text = """You are an assistant tasked with summarizing tables and text. \ Give a concise summary of the table or text. Table or text chunk: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model =
ChatOllama(model="llama2:13b-chat")
langchain_community.chat_models.ChatOllama
from langchain_community.utils.openai_functions import ( convert_pydantic_to_openai_function, ) from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field, validator from langchain_openai import ChatOpenAI class Joke(BaseModel): """Joke to tell user.""" setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke") openai_functions = [convert_pydantic_to_openai_function(Joke)] model = ChatOpenAI(temperature=0) prompt = ChatPromptTemplate.from_messages( [("system", "You are helpful assistant"), ("user", "{input}")] ) from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser parser = JsonOutputFunctionsParser() chain = prompt | model.bind(functions=openai_functions) | parser chain.invoke({"input": "tell me a joke"}) for s in chain.stream({"input": "tell me a joke"}): print(s) from typing import List from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser class Jokes(BaseModel): """Jokes to tell user.""" joke: List[Joke] funniness_level: int parser = JsonKeyOutputFunctionsParser(key_name="joke") openai_functions = [convert_pydantic_to_openai_function(Jokes)] chain = prompt | model.bind(functions=openai_functions) | parser chain.invoke({"input": "tell me two jokes"}) for s in chain.stream({"input": "tell me two jokes"}): print(s) from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser class Joke(BaseModel): """Joke to tell user.""" setup: str =
Field(description="question to set up a joke")
langchain_core.pydantic_v1.Field
get_ipython().run_line_magic('pip', 'install --upgrade --quiet apify-client') from langchain_community.document_loaders import ApifyDatasetLoader from langchain_community.document_loaders.base import Document loader = ApifyDatasetLoader( dataset_id="your-dataset-id", dataset_mapping_function=lambda dataset_item: Document( page_content=dataset_item["text"], metadata={"source": dataset_item["url"]} ), ) data = loader.load() from langchain.docstore.document import Document from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import ApifyDatasetLoader loader = ApifyDatasetLoader( dataset_id="your-dataset-id", dataset_mapping_function=lambda item: Document( page_content=item["text"] or "", metadata={"source": item["url"]} ), ) index =
VectorstoreIndexCreator()
langchain.indexes.VectorstoreIndexCreator
get_ipython().run_line_magic('pip', 'install --upgrade --quiet openlm') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') import os from getpass import getpass if "OPENAI_API_KEY" not in os.environ: print("Enter your OpenAI API key:") os.environ["OPENAI_API_KEY"] = getpass() if "HF_API_TOKEN" not in os.environ: print("Enter your HuggingFace Hub API key:") os.environ["HF_API_TOKEN"] = getpass() from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import OpenLM question = "What is the capital of France?" template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) for model in ["text-davinci-003", "huggingface.co/gpt2"]: llm =
OpenLM(model=model)
langchain_community.llms.OpenLM
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-cloud-sql-pg langchain-google-vertexai') from google.colab import auth auth.authenticate_user() PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') get_ipython().system('gcloud services enable sqladmin.googleapis.com') REGION = "us-central1" # @param {type: "string"} INSTANCE = "my-pg-instance" # @param {type: "string"} DATABASE = "my-database" # @param {type: "string"} TABLE_NAME = "vector_store" # @param {type: "string"} from langchain_google_cloud_sql_pg import PostgreSQLEngine engine = await PostgreSQLEngine.afrom_instance( project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE ) from langchain_google_cloud_sql_pg import PostgreSQLEngine await engine.ainit_vectorstore_table( table_name=TABLE_NAME, vector_size=768, # Vector size for VertexAI model(textembedding-gecko@latest) ) get_ipython().system('gcloud services enable aiplatform.googleapis.com') from langchain_google_vertexai import VertexAIEmbeddings embedding = VertexAIEmbeddings( model_name="textembedding-gecko@latest", project=PROJECT_ID ) from langchain_google_cloud_sql_pg import PostgresVectorStore store = await
PostgresVectorStore.create( # Use .create()
langchain_google_cloud_sql_pg.PostgresVectorStore.create
get_ipython().run_line_magic('pip', 'install --upgrade --quiet predictionguard langchain') import os from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import PredictionGuard os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>" os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>" pgllm = PredictionGuard(model="OpenAI-text-davinci-003") pgllm("Tell me a joke") template = """Respond to the following query based on the context. Context: EVERY comment, DM + email suggestion has led us to this EXCITING announcement! πŸŽ‰ We have officially added TWO new candle subscription box options! πŸ“¦ Exclusive Candle Box - $80 Monthly Candle Box - $45 (NEW!) Scent of The Month Box - $28 (NEW!) Head to stories to get ALLL the deets on each box! πŸ‘† BONUS: Save 50% on your first box with code 50OFF! πŸŽ‰ Query: {query} Result: """ prompt = PromptTemplate.from_template(template) pgllm(prompt.format(query="What kind of post is this?")) pgllm = PredictionGuard( model="OpenAI-text-davinci-003", output={ "type": "categorical", "categories": ["product announcement", "apology", "relational"], }, ) pgllm(prompt.format(query="What kind of post is this?")) pgllm = PredictionGuard(model="OpenAI-text-davinci-003") template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True) question = "What NFL team won the Super Bowl in the year Justin Beiber was born?" llm_chain.predict(question=question) template = """Write a {adjective} poem about {subject}.""" prompt =
PromptTemplate.from_template(template)
langchain.prompts.PromptTemplate.from_template
get_ipython().run_cell_magic('writefile', 'whatsapp_chat.txt', "[8/15/23, 9:12:33 AM] Dr. Feather: \u200eMessages and calls are end-to-end encrypted. No one outside of this chat, not even WhatsApp, can read or listen to them.\n[8/15/23, 9:12:43 AM] Dr. Feather: I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest. Such a magnificent creature!\n\u200e[8/15/23, 9:12:48 AM] Dr. Feather: \u200eimage omitted\n[8/15/23, 9:13:15 AM] Jungle Jane: That's stunning! Were you able to observe its behavior?\n\u200e[8/15/23, 9:13:23 AM] Dr. Feather: \u200eimage omitted\n[8/15/23, 9:14:02 AM] Dr. Feather: Yes, it seemed quite social with other macaws. They're known for their playful nature.\n[8/15/23, 9:14:15 AM] Jungle Jane: How's the research going on parrot communication?\n\u200e[8/15/23, 9:14:30 AM] Dr. Feather: \u200eimage omitted\n[8/15/23, 9:14:50 AM] Dr. Feather: It's progressing well. We're learning so much about how they use sound and color to communicate.\n[8/15/23, 9:15:10 AM] Jungle Jane: That's fascinating! Can't wait to read your paper on it.\n[8/15/23, 9:15:20 AM] Dr. Feather: Thank you! I'll send you a draft soon.\n[8/15/23, 9:25:16 PM] Jungle Jane: Looking forward to it! Keep up the great work.\n") from langchain_community.chat_loaders.whatsapp import WhatsAppChatLoader loader = WhatsAppChatLoader( path="./whatsapp_chat.txt", ) from typing import List from langchain_community.chat_loaders.base import ChatSession from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) raw_messages = loader.lazy_load() merged_messages =
merge_chat_runs(raw_messages)
langchain_community.chat_loaders.utils.merge_chat_runs
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary') get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken') YBUSER = "[SANDBOX USER]" YBPASSWORD = "[SANDBOX PASSWORD]" YBDATABASE = "[SANDBOX_DATABASE]" YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com" OPENAI_API_KEY = "[OPENAI API KEY]" import os import pathlib import re import sys import urllib.parse as urlparse from getpass import getpass import psycopg2 from IPython.display import Markdown, display from langchain.chains import LLMChain, RetrievalQAWithSourcesChain from langchain.docstore.document import Document from langchain_community.vectorstores import Yellowbrick from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter yellowbrick_connection_string = ( f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}" ) YB_DOC_DATABASE = "sample_data" YB_DOC_TABLE = "yellowbrick_documentation" embedding_table = "my_embeddings" os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) system_template = """If you don't know the answer, Make up your best guess.""" messages = [ SystemMessagePromptTemplate.from_template(system_template), HumanMessagePromptTemplate.from_template("{question}"), ] prompt = ChatPromptTemplate.from_messages(messages) chain_type_kwargs = {"prompt": prompt} llm = ChatOpenAI( model_name="gpt-3.5-turbo", # Modify model_name if you have access to GPT-4 temperature=0, max_tokens=256, ) chain = LLMChain( llm=llm, prompt=prompt, verbose=False, ) def print_result_simple(query): result = chain(query) output_text = f"""### Question: {query} {result['text']} """ display(Markdown(output_text)) print_result_simple("How many databases can be in a Yellowbrick Instance?") print_result_simple("What's an easy way to add users in bulk to Yellowbrick?") try: conn = psycopg2.connect(yellowbrick_connection_string) except psycopg2.Error as e: print(f"Error connecting to the database: {e}") exit(1) cursor = conn.cursor() create_table_query = f""" CREATE TABLE if not exists {embedding_table} ( id uuid, embedding_id integer, text character varying(60000), metadata character varying(1024), embedding double precision ) DISTRIBUTE ON (id); truncate table {embedding_table}; """ try: cursor.execute(create_table_query) print(f"Table '{embedding_table}' created successfully!") except psycopg2.Error as e: print(f"Error creating table: {e}") conn.rollback() conn.commit() cursor.close() conn.close() yellowbrick_doc_connection_string = ( f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YB_DOC_DATABASE}" ) conn = psycopg2.connect(yellowbrick_doc_connection_string) cursor = conn.cursor() query = f"SELECT path, document FROM {YB_DOC_TABLE}" cursor.execute(query) yellowbrick_documents = cursor.fetchall() print(f"Extracted {len(yellowbrick_documents)} documents successfully!") cursor.close() conn.close() DOCUMENT_BASE_URL = "https://docs.yellowbrick.com/6.7.1/" # Actual URL separator = "\n## " # This separator assumes Markdown docs from the repo uses ### as logical main header most of the time chunk_size_limit = 2000 max_chunk_overlap = 200 documents = [ Document( page_content=document[1], metadata={"source": DOCUMENT_BASE_URL + document[0].replace(".md", ".html")}, ) for document in yellowbrick_documents ] text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size_limit, chunk_overlap=max_chunk_overlap, separators=[separator, "\nn", "\n", ",", " ", ""], ) split_docs = text_splitter.split_documents(documents) docs_text = [doc.page_content for doc in split_docs] embeddings = OpenAIEmbeddings() vector_store = Yellowbrick.from_documents( documents=split_docs, embedding=embeddings, connection_string=yellowbrick_connection_string, table=embedding_table, ) print(f"Created vector store with {len(documents)} documents") system_template = """Use the following pieces of context to answer the users question. Take note of the sources and include them in the answer in the format: "SOURCES: source1 source2", use "SOURCES" in capital letters regardless of the number of sources. If you don't know the answer, just say that "I don't know", don't try to make up an answer. ---------------- {summaries}""" messages = [ SystemMessagePromptTemplate.from_template(system_template), HumanMessagePromptTemplate.from_template("{question}"), ] prompt =
ChatPromptTemplate.from_messages(messages)
langchain.prompts.chat.ChatPromptTemplate.from_messages
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain from langchain.prompts import PromptTemplate from langchain_openai import OpenAI, OpenAIEmbeddings base_embeddings = OpenAIEmbeddings() llm = OpenAI() embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search") result = embeddings.embed_query("Where is the Taj Mahal?") multi_llm = OpenAI(n=4, best_of=4) embeddings = HypotheticalDocumentEmbedder.from_llm( multi_llm, base_embeddings, "web_search" ) result = embeddings.embed_query("Where is the Taj Mahal?") prompt_template = """Please answer the user's question about the most recent state of the union address Question: {question} Answer:""" prompt = PromptTemplate(input_variables=["question"], template=prompt_template) llm_chain = LLMChain(llm=llm, prompt=prompt) embeddings = HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=base_embeddings ) result = embeddings.embed_query( "What did the president say about Ketanji Brown Jackson" ) from langchain_community.vectorstores import Chroma from langchain_text_splitters import CharacterTextSplitter with open("../../state_of_the_union.txt") as f: state_of_the_union = f.read() text_splitter =
CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
langchain_text_splitters.CharacterTextSplitter
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python') get_ipython().system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python') get_ipython().system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir') get_ipython().system('CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python') get_ipython().system('CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir') get_ipython().system('python -m pip install -e . --force-reinstall --no-cache-dir') from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import LlamaCpp template = """Question: {question} Answer: Let's work this out in a step by step way to be sure we have the right answer.""" prompt = PromptTemplate.from_template(template) callback_manager = CallbackManager([
StreamingStdOutCallbackHandler()
langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_openai.chat_models import ChatOpenAI model = ChatOpenAI() prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're an assistant who's good at {ability}. Respond in 20 words or fewer", ), MessagesPlaceholder(variable_name="history"), ("human", "{input}"), ] ) runnable = prompt | model from langchain_community.chat_message_histories import ChatMessageHistory from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory store = {} def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id] with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", ) with_message_history.invoke( {"ability": "math", "input": "What does cosine mean?"}, config={"configurable": {"session_id": "abc123"}}, ) with_message_history.invoke( {"ability": "math", "input": "What?"}, config={"configurable": {"session_id": "abc123"}}, ) with_message_history.invoke( {"ability": "math", "input": "What?"}, config={"configurable": {"session_id": "def234"}}, ) from langchain_core.runnables import ConfigurableFieldSpec store = {} def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory: if (user_id, conversation_id) not in store: store[(user_id, conversation_id)] = ChatMessageHistory() return store[(user_id, conversation_id)] with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", history_factory_config=[ ConfigurableFieldSpec( id="user_id", annotation=str, name="User ID", description="Unique identifier for the user.", default="", is_shared=True, ), ConfigurableFieldSpec( id="conversation_id", annotation=str, name="Conversation ID", description="Unique identifier for the conversation.", default="", is_shared=True, ), ], ) with_message_history.invoke( {"ability": "math", "input": "Hello"}, config={"configurable": {"user_id": "123", "conversation_id": "1"}}, ) from langchain_core.messages import HumanMessage from langchain_core.runnables import RunnableParallel chain = RunnableParallel({"output_message": ChatOpenAI()}) def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id] with_message_history = RunnableWithMessageHistory( chain, get_session_history, output_messages_key="output_message", ) with_message_history.invoke( [HumanMessage(content="What did Simone de Beauvoir believe about free will")], config={"configurable": {"session_id": "baz"}}, ) with_message_history.invoke( [HumanMessage(content="How did this compare to Sartre")], config={"configurable": {"session_id": "baz"}}, ) RunnableWithMessageHistory( ChatOpenAI(), get_session_history, ) from operator import itemgetter RunnableWithMessageHistory( itemgetter("input_messages") |
ChatOpenAI()
langchain_openai.chat_models.ChatOpenAI
from langchain_community.llms import Ollama llm =
Ollama(model="llama2")
langchain_community.llms.Ollama
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-robocorp') from langchain.agents import AgentExecutor, OpenAIFunctionsAgent from langchain_core.messages import SystemMessage from langchain_openai import ChatOpenAI from langchain_robocorp import ActionServerToolkit llm = ChatOpenAI(model="gpt-4", temperature=0) toolkit = ActionServerToolkit(url="http://localhost:8080", report_trace=True) tools = toolkit.get_tools() system_message = SystemMessage(content="You are a helpful assistant") prompt = OpenAIFunctionsAgent.create_prompt(system_message) agent = OpenAIFunctionsAgent(llm=llm, prompt=prompt, tools=tools) executor = AgentExecutor(agent=agent, tools=tools, verbose=True) executor.invoke("What is the current weather today in San Francisco in fahrenheit?") toolkit =
ActionServerToolkit(url="http://localhost:8080")
langchain_robocorp.ActionServerToolkit
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain_community.chat_models import ChatAnthropic from langchain_openai import ChatOpenAI from unittest.mock import patch import httpx from openai import RateLimitError request = httpx.Request("GET", "/") response = httpx.Response(200, request=request) error = RateLimitError("rate limit", response=response, body="") openai_llm = ChatOpenAI(max_retries=0) anthropic_llm = ChatAnthropic() llm = openai_llm.with_fallbacks([anthropic_llm]) with patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(openai_llm.invoke("Why did the chicken cross the road?")) except RateLimitError: print("Hit error") with patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(llm.invoke("Why did the chicken cross the road?")) except RateLimitError: print("Hit error") from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a nice assistant who always includes a compliment in your response", ), ("human", "Why did the {animal} cross the road"), ] ) chain = prompt | llm with patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(chain.invoke({"animal": "kangaroo"})) except RateLimitError: print("Hit error") from langchain_core.output_parsers import StrOutputParser chat_prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a nice assistant who always includes a compliment in your response", ), ("human", "Why did the {animal} cross the road"), ] ) chat_model = ChatOpenAI(model_name="gpt-fake") bad_chain = chat_prompt | chat_model | StrOutputParser() from langchain.prompts import PromptTemplate from langchain_openai import OpenAI prompt_template = """Instructions: You should always include a compliment in your response. Question: Why did the {animal} cross the road?""" prompt = PromptTemplate.from_template(prompt_template) llm = OpenAI() good_chain = prompt | llm chain = bad_chain.with_fallbacks([good_chain]) chain.invoke({"animal": "turtle"}) short_llm = ChatOpenAI() long_llm = ChatOpenAI(model="gpt-3.5-turbo-16k") llm = short_llm.with_fallbacks([long_llm]) inputs = "What is the next number: " + ", ".join(["one", "two"] * 3000) try: print(short_llm.invoke(inputs)) except Exception as e: print(e) try: print(llm.invoke(inputs)) except Exception as e: print(e) from langchain.output_parsers import DatetimeOutputParser prompt = ChatPromptTemplate.from_template( "what time was {event} (in %Y-%m-%dT%H:%M:%S.%fZ format - only return this value)" ) openai_35 = ChatOpenAI() | DatetimeOutputParser() openai_4 = ChatOpenAI(model="gpt-4") |
DatetimeOutputParser()
langchain.output_parsers.DatetimeOutputParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-spanner') from google.colab import auth auth.authenticate_user() PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') get_ipython().system('gcloud services enable spanner.googleapis.com') INSTANCE = "my-instance" # @param {type: "string"} DATABASE = "my-database" # @param {type: "string"} TABLE_NAME = "vectors_search_data" # @param {type: "string"} from langchain_google_spanner import SecondaryIndex, SpannerVectorStore, TableColumn SpannerVectorStore.init_vector_store_table( instance_id=INSTANCE, database_id=DATABASE, table_name=TABLE_NAME, id_column="row_id", metadata_columns=[
TableColumn(name="metadata", type="JSON", is_null=True)
langchain_google_spanner.TableColumn
import os embaas_api_key = "YOUR_API_KEY" os.environ["EMBAAS_API_KEY"] = "YOUR_API_KEY" from langchain_community.embeddings import EmbaasEmbeddings embeddings =
EmbaasEmbeddings()
langchain_community.embeddings.EmbaasEmbeddings
from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain.prompts import PromptTemplate from langchain_openai import OpenAI template = """You are a chatbot having a conversation with a human. {chat_history} Human: {human_input} Chatbot:""" prompt = PromptTemplate( input_variables=["chat_history", "human_input"], template=template ) memory =
ConversationBufferMemory(memory_key="chat_history")
langchain.memory.ConversationBufferMemory
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-robocorp') from langchain.agents import AgentExecutor, OpenAIFunctionsAgent from langchain_core.messages import SystemMessage from langchain_openai import ChatOpenAI from langchain_robocorp import ActionServerToolkit llm = ChatOpenAI(model="gpt-4", temperature=0) toolkit = ActionServerToolkit(url="http://localhost:8080", report_trace=True) tools = toolkit.get_tools() system_message = SystemMessage(content="You are a helpful assistant") prompt = OpenAIFunctionsAgent.create_prompt(system_message) agent = OpenAIFunctionsAgent(llm=llm, prompt=prompt, tools=tools) executor =
AgentExecutor(agent=agent, tools=tools, verbose=True)
langchain.agents.AgentExecutor
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI template = """Answer the users question based only on the following context: <context> {context} </context> Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI(temperature=0) search = DuckDuckGoSearchAPIWrapper() def retriever(query): return search.run(query) chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser() ) simple_query = "what is langchain?" chain.invoke(simple_query) distracted_query = "man that sam bankman fried trial was crazy! what is langchain?" chain.invoke(distracted_query) retriever(distracted_query) template = """Provide a better search query for \ web search engine to answer the given question, end \ the queries with ’**’. Question: \ {x} Answer:""" rewrite_prompt = ChatPromptTemplate.from_template(template) from langchain import hub rewrite_prompt = hub.pull("langchain-ai/rewrite") print(rewrite_prompt.template) def _parse(text): return text.strip("**") rewriter = rewrite_prompt | ChatOpenAI(temperature=0) |
StrOutputParser()
langchain_core.output_parsers.StrOutputParser
import os os.environ["SEARCHAPI_API_KEY"] = "" from langchain_community.utilities import SearchApiAPIWrapper search =
SearchApiAPIWrapper()
langchain_community.utilities.SearchApiAPIWrapper
get_ipython().system(' pip install langchain langchain-experimental openai elasticsearch') from elasticsearch import Elasticsearch from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain from langchain_openai import ChatOpenAI ELASTIC_SEARCH_SERVER = "https://elastic:pass@localhost:9200" db = Elasticsearch(ELASTIC_SEARCH_SERVER) llm = ChatOpenAI(model_name="gpt-4", temperature=0) chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True) question = "What are the first names of all the customers?" chain.run(question) from langchain.prompts.prompt import PromptTemplate PROMPT_TEMPLATE = """Given an input question, create a syntactically correct Elasticsearch query to run. Unless the user specifies in their question a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database. Unless told to do not query for all the columns from a specific index, only ask for a the few relevant columns given the question. Pay attention to use only the column names that you can see in the mapping description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which index. Return the query as valid json. Use the following format: Question: Question here ESQuery: Elasticsearch Query formatted as json """ PROMPT = PromptTemplate.from_template( PROMPT_TEMPLATE, ) chain =
ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, query_prompt=PROMPT)
langchain.chains.elasticsearch_database.ElasticsearchDatabaseChain.from_llm
get_ipython().run_line_magic('pip', 'install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2') import os from langchain_community.llms import HuggingFaceTextGenInference ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>" HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") llm = HuggingFaceTextGenInference( inference_server_url=ENDPOINT_URL, max_new_tokens=512, top_k=50, temperature=0.1, repetition_penalty=1.03, server_kwargs={ "headers": { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json", } }, ) from langchain_community.llms import HuggingFaceEndpoint ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>" llm = HuggingFaceEndpoint( endpoint_url=ENDPOINT_URL, task="text-generation", model_kwargs={ "max_new_tokens": 512, "top_k": 50, "temperature": 0.1, "repetition_penalty": 1.03, }, ) from langchain_community.llms import HuggingFaceHub llm = HuggingFaceHub( repo_id="HuggingFaceH4/zephyr-7b-beta", task="text-generation", model_kwargs={ "max_new_tokens": 512, "top_k": 30, "temperature": 0.1, "repetition_penalty": 1.03, }, ) from langchain.schema import ( HumanMessage, SystemMessage, ) from langchain_community.chat_models.huggingface import ChatHuggingFace messages = [ SystemMessage(content="You're a helpful assistant"), HumanMessage( content="What happens when an unstoppable force meets an immovable object?" ), ] chat_model = ChatHuggingFace(llm=llm) chat_model.model_id chat_model._to_chat_prompt(messages) res = chat_model.invoke(messages) print(res.content) from langchain import hub from langchain.agents import AgentExecutor, load_tools from langchain.agents.format_scratchpad import format_log_to_str from langchain.agents.output_parsers import ( ReActJsonSingleInputOutputParser, ) from langchain.tools.render import render_text_description from langchain_community.utilities import SerpAPIWrapper tools =
load_tools(["serpapi", "llm-math"], llm=llm)
langchain.agents.load_tools
from langchain import hub from langchain.agents import AgentExecutor, tool from langchain.agents.output_parsers import XMLAgentOutputParser from langchain_community.chat_models import ChatAnthropic model =
ChatAnthropic(model="claude-2")
langchain_community.chat_models.ChatAnthropic
get_ipython().run_line_magic('pip', 'install -qU esprima esprima tree_sitter tree_sitter_languages') import warnings warnings.filterwarnings("ignore") from pprint import pprint from langchain_community.document_loaders.generic import GenericLoader from langchain_community.document_loaders.parsers import LanguageParser from langchain_text_splitters import Language loader = GenericLoader.from_filesystem( "./example_data/source_code", glob="*", suffixes=[".py", ".js"], parser=
LanguageParser()
langchain_community.document_loaders.parsers.LanguageParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llmlingua accelerate') def pretty_print_docs(docs): print( f"\n{'-' * 100}\n".join( [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)] ) ) from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter documents = TextLoader( "../../modules/state_of_the_union.txt", ).load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) texts = text_splitter.split_documents(documents) embedding = OpenAIEmbeddings(model="text-embedding-ada-002") retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={"k": 20}) query = "What did the president say about Ketanji Brown Jackson" docs = retriever.get_relevant_documents(query) pretty_print_docs(docs) from langchain.retrievers import ContextualCompressionRetriever from langchain_community.retrievers.document_compressors import LLMLinguaCompressor from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0) compressor =
LLMLinguaCompressor(model_name="openai-community/gpt2", device_map="cpu")
langchain_community.retrievers.document_compressors.LLMLinguaCompressor
import re from IPython.display import Image, display from steamship import Block, Steamship from langchain.agents import AgentType, initialize_agent from langchain.tools import SteamshipImageGenerationTool from langchain_openai import OpenAI llm = OpenAI(temperature=0) tools = [SteamshipImageGenerationTool(model_name="dall-e")] mrkl = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) output = mrkl.run("How would you visualize a parot playing soccer?") def show_output(output): """Display the multi-modal output from the agent.""" UUID_PATTERN = re.compile( r"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})" ) outputs = UUID_PATTERN.split(output) outputs = [ re.sub(r"^\W+", "", el) for el in outputs ] # Clean trailing and leading non-word characters for output in outputs: maybe_block_id = UUID_PATTERN.search(output) if maybe_block_id: display(Image(Block.get(Steamship(), _id=maybe_block_id.group()).raw())) else: print(output, end="\n\n") tools = [
SteamshipImageGenerationTool(model_name="stable-diffusion")
langchain.tools.SteamshipImageGenerationTool
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints') import getpass import os if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): nvapi_key = getpass.getpass("Enter your NVIDIA API key: ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="mixtral_8x7b") result = llm.invoke("Write a ballad about LangChain.") print(result.content) print(llm.batch(["What's 2*3?", "What's 2*6?"])) for chunk in llm.stream("How far can a seagull fly in one day?"): print(chunk.content, end="|") async for chunk in llm.astream( "How long does it take for monarch butterflies to migrate?" ): print(chunk.content, end="|") ChatNVIDIA.get_available_models() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt =
ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")
langchain_core.prompts.ChatPromptTemplate.from_messages
meals = [ "Beef Enchiladas with Feta cheese. Mexican-Greek fusion", "Chicken Flatbreads with red sauce. Italian-Mexican fusion", "Veggie sweet potato quesadillas with vegan cheese", "One-Pan Tortelonni bake with peppers and onions", ] from langchain_openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-instruct") from langchain.prompts import PromptTemplate PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}". Embed the meal into the given text: "{text_to_personalize}". Prepend a personalized message including the user's name "{user}" and their preference "{preference}". Make it sound good. """ PROMPT = PromptTemplate( input_variables=["meal", "text_to_personalize", "user", "preference"], template=PROMPT_TEMPLATE, ) import langchain_experimental.rl_chain as rl_chain chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs \ believe you will love it!", ) print(response["response"]) for _ in range(5): try: response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) except Exception as e: print(e) print(response["response"]) print() scoring_criteria_template = ( "Given {preference} rank how good or bad this selection is {meal}" ) chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer( llm=llm, scoring_criteria_template_str=scoring_criteria_template ), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=
rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"])
langchain_experimental.rl_chain.BasedOn
from langchain.indexes import SQLRecordManager, index from langchain_core.documents import Document from langchain_elasticsearch import ElasticsearchStore from langchain_openai import OpenAIEmbeddings collection_name = "test_index" embedding = OpenAIEmbeddings() vectorstore = ElasticsearchStore( es_url="http://localhost:9200", index_name="test_index", embedding=embedding ) namespace = f"elasticsearch/{collection_name}" record_manager = SQLRecordManager( namespace, db_url="sqlite:///record_manager_cache.sql" ) record_manager.create_schema() doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"}) doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"}) def _clear(): """Hacky helper method to clear content. See the `full` mode section to to understand why it works.""" index([], record_manager, vectorstore, cleanup="full", source_id_key="source") _clear() index( [doc1, doc1, doc1, doc1, doc1], record_manager, vectorstore, cleanup=None, source_id_key="source", ) _clear() index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") _clear() index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) index([], record_manager, vectorstore, cleanup="incremental", source_id_key="source") changed_doc_2 = Document(page_content="puppy", metadata={"source": "doggy.txt"}) index( [changed_doc_2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) _clear() all_docs = [doc1, doc2]
index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source")
langchain.indexes.index
from langchain_community.utils.openai_functions import ( convert_pydantic_to_openai_function, ) from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field, validator from langchain_openai import ChatOpenAI class Joke(BaseModel): """Joke to tell user.""" setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke") openai_functions = [convert_pydantic_to_openai_function(Joke)] model = ChatOpenAI(temperature=0) prompt = ChatPromptTemplate.from_messages( [("system", "You are helpful assistant"), ("user", "{input}")] ) from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser parser = JsonOutputFunctionsParser() chain = prompt | model.bind(functions=openai_functions) | parser chain.invoke({"input": "tell me a joke"}) for s in chain.stream({"input": "tell me a joke"}): print(s) from typing import List from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser class Jokes(BaseModel): """Jokes to tell user.""" joke: List[Joke] funniness_level: int parser = JsonKeyOutputFunctionsParser(key_name="joke") openai_functions = [convert_pydantic_to_openai_function(Jokes)] chain = prompt | model.bind(functions=openai_functions) | parser chain.invoke({"input": "tell me two jokes"}) for s in chain.stream({"input": "tell me two jokes"}): print(s) from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser class Joke(BaseModel): """Joke to tell user.""" setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke") @validator("setup") def question_ends_with_question_mark(cls, field): if field[-1] != "?": raise ValueError("Badly formed question!") return field parser =
PydanticOutputFunctionsParser(pydantic_schema=Joke)
langchain.output_parsers.openai_functions.PydanticOutputFunctionsParser
from langchain.memory import ConversationTokenBufferMemory from langchain_openai import OpenAI llm = OpenAI() memory =
ConversationTokenBufferMemory(llm=llm, max_token_limit=10)
langchain.memory.ConversationTokenBufferMemory
from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryByteStore from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter loaders = [ TextLoader("../../paul_graham_essay.txt"), TextLoader("../../state_of_the_union.txt"), ] docs = [] for loader in loaders: docs.extend(loader.load()) text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000) docs = text_splitter.split_documents(docs) vectorstore = Chroma( collection_name="full_documents", embedding_function=OpenAIEmbeddings() ) store = InMemoryByteStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key, ) import uuid doc_ids = [str(uuid.uuid4()) for _ in docs] child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400) sub_docs = [] for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) retriever.vectorstore.add_documents(sub_docs) retriever.docstore.mset(list(zip(doc_ids, docs))) retriever.vectorstore.similarity_search("justice breyer")[0] len(retriever.get_relevant_documents("justice breyer")[0].page_content) from langchain.retrievers.multi_vector import SearchType retriever.search_type = SearchType.mmr len(retriever.get_relevant_documents("justice breyer")[0].page_content) import uuid from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI chain = ( {"doc": lambda x: x.page_content} |
ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}")
langchain_core.prompts.ChatPromptTemplate.from_template
REGION = "us-central1" # @param {type:"string"} INSTANCE = "test-instance" # @param {type:"string"} DATABASE = "test" # @param {type:"string"} TABLE_NAME = "test-default" # @param {type:"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-cloud-sql-mysql') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable sqladmin.googleapis.com') from langchain_google_cloud_sql_mysql import MySQLEngine engine = MySQLEngine.from_instance( project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE ) engine.init_document_table(TABLE_NAME, overwrite_existing=True) from langchain_core.documents import Document from langchain_google_cloud_sql_mysql import MySQLDocumentSaver test_docs = [ Document( page_content="Apple Granny Smith 150 0.99 1", metadata={"fruit_id": 1}, ), Document( page_content="Banana Cavendish 200 0.59 0", metadata={"fruit_id": 2}, ), Document( page_content="Orange Navel 80 1.29 1", metadata={"fruit_id": 3}, ), ] saver = MySQLDocumentSaver(engine=engine, table_name=TABLE_NAME) saver.add_documents(test_docs) from langchain_google_cloud_sql_mysql import MySQLLoader loader = MySQLLoader(engine=engine, table_name=TABLE_NAME) docs = loader.lazy_load() for doc in docs: print("Loaded documents:", doc) from langchain_google_cloud_sql_mysql import MySQLLoader loader = MySQLLoader( engine=engine, query=f"select * from `{TABLE_NAME}` where JSON_EXTRACT(langchain_metadata, '$.fruit_id') = 1;", ) onedoc = loader.load() onedoc from langchain_google_cloud_sql_mysql import MySQLLoader loader =
MySQLLoader(engine=engine, table_name=TABLE_NAME)
langchain_google_cloud_sql_mysql.MySQLLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python') import os from langchain.callbacks import ContextCallbackHandler token = os.environ["CONTEXT_API_TOKEN"] context_callback = ContextCallbackHandler(token) import os from langchain.callbacks import ContextCallbackHandler from langchain.schema import ( HumanMessage, SystemMessage, ) from langchain_openai import ChatOpenAI token = os.environ["CONTEXT_API_TOKEN"] chat = ChatOpenAI( headers={"user_id": "123"}, temperature=0, callbacks=[ContextCallbackHandler(token)] ) messages = [ SystemMessage( content="You are a helpful assistant that translates English to French." ), HumanMessage(content="I love programming."), ] print(chat(messages)) import os from langchain.callbacks import ContextCallbackHandler from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, ) from langchain_openai import ChatOpenAI token = os.environ["CONTEXT_API_TOKEN"] human_message_prompt = HumanMessagePromptTemplate( prompt=PromptTemplate( template="What is a good name for a company that makes {product}?", input_variables=["product"], ) ) chat_prompt_template =
ChatPromptTemplate.from_messages([human_message_prompt])
langchain.prompts.chat.ChatPromptTemplate.from_messages
from getpass import getpass from langchain_community.document_loaders.larksuite import LarkSuiteDocLoader DOMAIN = input("larksuite domain") ACCESS_TOKEN = getpass("larksuite tenant_access_token or user_access_token") DOCUMENT_ID = input("larksuite document id") from pprint import pprint larksuite_loader =
LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)
langchain_community.document_loaders.larksuite.LarkSuiteDocLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "cassio>=0.1.4"') import os from getpass import getpass from datasets import ( load_dataset, ) from langchain_community.document_loaders import PyPDFLoader from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter os.environ["OPENAI_API_KEY"] = getpass("OPENAI_API_KEY = ") embe = OpenAIEmbeddings() from langchain_community.vectorstores import Cassandra from cassandra.cluster import Cluster cluster = Cluster(["127.0.0.1"]) session = cluster.connect() import cassio CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ") cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE) vstore = Cassandra( embedding=embe, table_name="cassandra_vector_demo", ) ASTRA_DB_ID = input("ASTRA_DB_ID = ") ASTRA_DB_APPLICATION_TOKEN = getpass("ASTRA_DB_APPLICATION_TOKEN = ") desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ") if desired_keyspace: ASTRA_DB_KEYSPACE = desired_keyspace else: ASTRA_DB_KEYSPACE = None import cassio cassio.init( database_id=ASTRA_DB_ID, token=ASTRA_DB_APPLICATION_TOKEN, keyspace=ASTRA_DB_KEYSPACE, ) vstore = Cassandra( embedding=embe, table_name="cassandra_vector_demo", ) philo_dataset = load_dataset("datastax/philosopher-quotes")["train"] docs = [] for entry in philo_dataset: metadata = {"author": entry["author"]} doc = Document(page_content=entry["quote"], metadata=metadata) docs.append(doc) inserted_ids = vstore.add_documents(docs) print(f"\nInserted {len(inserted_ids)} documents.") texts = ["I think, therefore I am.", "To the things themselves!"] metadatas = [{"author": "descartes"}, {"author": "husserl"}] ids = ["desc_01", "huss_xy"] inserted_ids_2 = vstore.add_texts(texts=texts, metadatas=metadatas, ids=ids) print(f"\nInserted {len(inserted_ids_2)} documents.") results = vstore.similarity_search("Our life is what we make of it", k=3) for res in results: print(f"* {res.page_content} [{res.metadata}]") results_filtered = vstore.similarity_search( "Our life is what we make of it", k=3, filter={"author": "plato"}, ) for res in results_filtered: print(f"* {res.page_content} [{res.metadata}]") results = vstore.similarity_search_with_score("Our life is what we make of it", k=3) for res, score in results: print(f"* [SIM={score:3f}] {res.page_content} [{res.metadata}]") results = vstore.max_marginal_relevance_search( "Our life is what we make of it", k=3, filter={"author": "aristotle"}, ) for res in results: print(f"* {res.page_content} [{res.metadata}]") delete_1 = vstore.delete(inserted_ids[:3]) print(f"all_succeed={delete_1}") # True, all documents deleted delete_2 = vstore.delete(inserted_ids[2:5]) print(f"some_succeeds={delete_2}") # True, though some IDs were gone already get_ipython().system('curl -L "https://github.com/awesome-astra/datasets/blob/main/demo-resources/what-is-philosophy/what-is-philosophy.pdf?raw=true" -o "what-is-philosophy.pdf"') pdf_loader = PyPDFLoader("what-is-philosophy.pdf") splitter =
RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=64)
langchain_text_splitters.RecursiveCharacterTextSplitter
from langchain.agents import AgentType, initialize_agent from langchain.chains import LLMMathChain from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.tools import Tool from langchain_openai import ChatOpenAI get_ipython().run_line_magic('pip', 'install --upgrade --quiet numexpr') llm = ChatOpenAI(temperature=0, model="gpt-4") llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True) primes = {998: 7901, 999: 7907, 1000: 7919} class CalculatorInput(BaseModel): question: str = Field() class PrimeInput(BaseModel): n: int = Field() def is_prime(n: int) -> bool: if n <= 1 or (n % 2 == 0 and n > 2): return False for i in range(3, int(n**0.5) + 1, 2): if n % i == 0: return False return True def get_prime(n: int, primes: dict = primes) -> str: return str(primes.get(int(n))) async def aget_prime(n: int, primes: dict = primes) -> str: return str(primes.get(int(n))) tools = [ Tool( name="GetPrime", func=get_prime, description="A tool that returns the `n`th prime number", args_schema=PrimeInput, coroutine=aget_prime, ), Tool.from_function( func=llm_math_chain.run, name="Calculator", description="Useful for when you need to compute mathematical expressions", args_schema=CalculatorInput, coroutine=llm_math_chain.arun, ), ] from langchain import hub prompt = hub.pull("hwchase17/openai-functions-agent") from langchain.agents import create_openai_functions_agent agent =
create_openai_functions_agent(llm, tools, prompt)
langchain.agents.create_openai_functions_agent
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scann') from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import ScaNN from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = HuggingFaceEmbeddings() db =
ScaNN.from_documents(docs, embeddings)
langchain_community.vectorstores.ScaNN.from_documents
from langchain.indexes import SQLRecordManager, index from langchain_core.documents import Document from langchain_elasticsearch import ElasticsearchStore from langchain_openai import OpenAIEmbeddings collection_name = "test_index" embedding = OpenAIEmbeddings() vectorstore = ElasticsearchStore( es_url="http://localhost:9200", index_name="test_index", embedding=embedding ) namespace = f"elasticsearch/{collection_name}" record_manager = SQLRecordManager( namespace, db_url="sqlite:///record_manager_cache.sql" ) record_manager.create_schema() doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"}) doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"}) def _clear(): """Hacky helper method to clear content. See the `full` mode section to to understand why it works.""" index([], record_manager, vectorstore, cleanup="full", source_id_key="source") _clear() index( [doc1, doc1, doc1, doc1, doc1], record_manager, vectorstore, cleanup=None, source_id_key="source", ) _clear() index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") _clear() index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) index([], record_manager, vectorstore, cleanup="incremental", source_id_key="source") changed_doc_2 = Document(page_content="puppy", metadata={"source": "doggy.txt"}) index( [changed_doc_2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) _clear() all_docs = [doc1, doc2] index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source") del all_docs[0] all_docs
index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source")
langchain.indexes.index
from typing import Optional from langchain_experimental.autonomous_agents import BabyAGI from langchain_openai import OpenAI, OpenAIEmbeddings from langchain.docstore import InMemoryDocstore from langchain_community.vectorstores import FAISS embeddings_model = OpenAIEmbeddings() import faiss embedding_size = 1536 index = faiss.IndexFlatL2(embedding_size) vectorstore = FAISS(embeddings_model.embed_query, index,
InMemoryDocstore({})
langchain.docstore.InMemoryDocstore
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken") from langchain_community.vectorstores import DeepLake from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") activeloop_token = getpass.getpass("activeloop token:") embeddings = OpenAIEmbeddings() from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, overwrite=True) db.add_documents(docs) query = "What did the president say about Ketanji Brown Jackson" docs = db.similarity_search(query) print(docs[0].page_content) db =
DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, read_only=True)
langchain_community.vectorstores.DeepLake
get_ipython().run_line_magic('pip', 'install --upgrade --quiet marqo') from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Marqo from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) import marqo marqo_url = "http://localhost:8882" # if using marqo cloud replace with your endpoint (console.marqo.ai) marqo_api_key = "" # if using marqo cloud replace with your api key (console.marqo.ai) client = marqo.Client(url=marqo_url, api_key=marqo_api_key) index_name = "langchain-demo" docsearch = Marqo.from_documents(docs, index_name=index_name) query = "What did the president say about Ketanji Brown Jackson" result_docs = docsearch.similarity_search(query) print(result_docs[0].page_content) result_docs = docsearch.similarity_search_with_score(query) print(result_docs[0][0].page_content, result_docs[0][1], sep="\n") index_name = "langchain-multimodal-demo" try: client.delete_index(index_name) except Exception: print(f"Creating {index_name}") settings = {"treat_urls_and_pointers_as_images": True, "model": "ViT-L/14"} client.create_index(index_name, **settings) client.index(index_name).add_documents( [ { "caption": "Bus", "image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image4.jpg", }, { "caption": "Plane", "image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image2.jpg", }, ], ) def get_content(res): """Helper to format Marqo's documents into text to be used as page_content""" return f"{res['caption']}: {res['image']}" docsearch =
Marqo(client, index_name, page_content_builder=get_content)
langchain_community.vectorstores.Marqo
get_ipython().run_line_magic('pip', 'install -U --quiet langchain langchain_community openai chromadb langchain-experimental') get_ipython().run_line_magic('pip', 'install --quiet "unstructured[all-docs]" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken') import logging import zipfile import requests logging.basicConfig(level=logging.INFO) data_url = "https://storage.googleapis.com/benchmarks-artifacts/langchain-docs-benchmarking/cj.zip" result = requests.get(data_url) filename = "cj.zip" with open(filename, "wb") as file: file.write(result.content) with zipfile.ZipFile(filename, "r") as zip_ref: zip_ref.extractall() from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader("./cj/cj.pdf") docs = loader.load() tables = [] texts = [d.page_content for d in docs] len(texts) from langchain.prompts import PromptTemplate from langchain_community.chat_models import ChatVertexAI from langchain_community.llms import VertexAI from langchain_core.messages import AIMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda def generate_text_summaries(texts, tables, summarize_texts=False): """ Summarize text elements texts: List of str tables: List of str summarize_texts: Bool to summarize texts """ prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \ These summaries will be embedded and used to retrieve the raw text or table elements. \ Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """ prompt = PromptTemplate.from_template(prompt_text) empty_response = RunnableLambda( lambda x: AIMessage(content="Error processing document") ) model = VertexAI( temperature=0, model_name="gemini-pro", max_output_tokens=1024 ).with_fallbacks([empty_response]) summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() text_summaries = [] table_summaries = [] if texts and summarize_texts: text_summaries = summarize_chain.batch(texts, {"max_concurrency": 1}) elif texts: text_summaries = texts if tables: table_summaries = summarize_chain.batch(tables, {"max_concurrency": 1}) return text_summaries, table_summaries text_summaries, table_summaries = generate_text_summaries( texts, tables, summarize_texts=True ) len(text_summaries) import base64 import os from langchain_core.messages import HumanMessage def encode_image(image_path): """Getting the base64 string""" with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def image_summarize(img_base64, prompt): """Make image summary""" model = ChatVertexAI(model_name="gemini-pro-vision", max_output_tokens=1024) msg = model( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, }, ] ) ] ) return msg.content def generate_img_summaries(path): """ Generate summaries and base64 encoded strings for images path: Path to list of .jpg files extracted by Unstructured """ img_base64_list = [] image_summaries = [] prompt = """You are an assistant tasked with summarizing images for retrieval. \ These summaries will be embedded and used to retrieve the raw image. \ Give a concise summary of the image that is well optimized for retrieval.""" for img_file in sorted(os.listdir(path)): if img_file.endswith(".jpg"): img_path = os.path.join(path, img_file) base64_image = encode_image(img_path) img_base64_list.append(base64_image) image_summaries.append(image_summarize(base64_image, prompt)) return img_base64_list, image_summaries img_base64_list, image_summaries = generate_img_summaries("./cj") len(image_summaries) import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.embeddings import VertexAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document def create_multi_vector_retriever( vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images ): """ Create retriever that indexes summaries, but returns raw images or texts """ store =
InMemoryStore()
langchain.storage.InMemoryStore
from langchain_community.document_loaders import IMSDbLoader loader =
IMSDbLoader("https://imsdb.com/scripts/BlacKkKlansman.html")
langchain_community.document_loaders.IMSDbLoader
get_ipython().system('pip3 install tcvectordb') from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.fake import FakeEmbeddings from langchain_community.vectorstores import TencentVectorDB from langchain_community.vectorstores.tencentvectordb import ConnectionParams from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = FakeEmbeddings(size=128) conn_params = ConnectionParams( url="http://10.0.X.X", key="eC4bLRy2va******************************", username="root", timeout=20, ) vector_db = TencentVectorDB.from_documents( docs, embeddings, connection_params=conn_params, ) query = "What did the president say about Ketanji Brown Jackson" docs = vector_db.similarity_search(query) docs[0].page_content vector_db =
TencentVectorDB(embeddings, conn_params)
langchain_community.vectorstores.TencentVectorDB
from langchain_community.document_loaders import NotionDirectoryLoader loader =
NotionDirectoryLoader("Notion_DB")
langchain_community.document_loaders.NotionDirectoryLoader
from langchain_community.document_loaders import S3FileLoader get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3') loader =
S3FileLoader("testing-hwc", "fake.docx")
langchain_community.document_loaders.S3FileLoader
get_ipython().system('pip install boto3') from langchain_experimental.recommenders import AmazonPersonalize recommender_arn = "<insert_arn>" client = AmazonPersonalize( credentials_profile_name="default", region_name="us-west-2", recommender_arn=recommender_arn, ) client.get_recommendations(user_id="1") from langchain.llms.bedrock import Bedrock from langchain_experimental.recommenders import AmazonPersonalizeChain bedrock_llm =
Bedrock(model_id="anthropic.claude-v2", region_name="us-west-2")
langchain.llms.bedrock.Bedrock
from langchain.evaluation import load_evaluator evaluator = load_evaluator("criteria", criteria="conciseness") from langchain.evaluation import EvaluatorType evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria="conciseness") eval_result = evaluator.evaluate_strings( prediction="What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.", input="What's 2+2?", ) print(eval_result) evaluator = load_evaluator("labeled_criteria", criteria="correctness") eval_result = evaluator.evaluate_strings( input="What is the capital of the US?", prediction="Topeka, KS", reference="The capital of the US is Topeka, KS, where it permanently moved from Washington D.C. on May 16, 2023", ) print(f'With ground truth: {eval_result["score"]}') from langchain.evaluation import Criteria list(Criteria) custom_criterion = { "numeric": "Does the output contain numeric or mathematical information?" } eval_chain = load_evaluator( EvaluatorType.CRITERIA, criteria=custom_criterion, ) query = "Tell me a joke" prediction = "I ate some square pie but I don't know the square of pi." eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query) print(eval_result) custom_criteria = { "numeric": "Does the output contain numeric information?", "mathematical": "Does the output contain mathematical information?", "grammatical": "Is the output grammatically correct?", "logical": "Is the output logical?", } eval_chain = load_evaluator( EvaluatorType.CRITERIA, criteria=custom_criteria, ) eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query) print("Multi-criteria evaluation") print(eval_result) from langchain.chains.constitutional_ai.principles import PRINCIPLES print(f"{len(PRINCIPLES)} available principles") list(PRINCIPLES.items())[:5] evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria=PRINCIPLES["harmful1"]) eval_result = evaluator.evaluate_strings( prediction="I say that man is a lilly-livered nincompoop", input="What do you think of Will?", ) print(eval_result) get_ipython().run_line_magic('pip', 'install --upgrade --quiet anthropic') from langchain_community.chat_models import ChatAnthropic llm = ChatAnthropic(temperature=0) evaluator =
load_evaluator("criteria", llm=llm, criteria="conciseness")
langchain.evaluation.load_evaluator
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().system('poetry run pip install replicate') from getpass import getpass REPLICATE_API_TOKEN = getpass() import os os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import Replicate llm = Replicate( model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5", model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1}, ) prompt = """ User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car? Assistant: """ llm(prompt) llm = Replicate( model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5" ) prompt = """ Answer the following yes/no question by reasoning step by step. Can a dog drive a car? """ llm(prompt) text2image = Replicate( model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf", model_kwargs={"image_dimensions": "512x512"}, ) image_output = text2image("A cat riding a motorcycle by Picasso") image_output get_ipython().system('poetry run pip install Pillow') from io import BytesIO import requests from PIL import Image response = requests.get(image_output) img = Image.open(BytesIO(response.content)) img from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler llm = Replicate( streaming=True, callbacks=[StreamingStdOutCallbackHandler()], model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5", model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1}, ) prompt = """ User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car? Assistant: """ _ = llm(prompt) import time llm = Replicate( model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5", model_kwargs={"temperature": 0.01, "max_length": 500, "top_p": 1}, ) prompt = """ User: What is the best way to learn python? Assistant: """ start_time = time.perf_counter() raw_output = llm(prompt) # raw output, no stop end_time = time.perf_counter() print(f"Raw output:\n {raw_output}") print(f"Raw output runtime: {end_time - start_time} seconds") start_time = time.perf_counter() stopped_output = llm(prompt, stop=["\n\n"]) # stop on double newlines end_time = time.perf_counter() print(f"Stopped output:\n {stopped_output}") print(f"Stopped output runtime: {end_time - start_time} seconds") from langchain.chains import SimpleSequentialChain dolly_llm =
Replicate( model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5" )
langchain_community.llms.Replicate
get_ipython().run_line_magic('pip', 'install --upgrade --quiet atlassian-python-api') import os from langchain.agents import AgentType, initialize_agent from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit from langchain_community.utilities.jira import JiraAPIWrapper from langchain_openai import OpenAI os.environ["JIRA_API_TOKEN"] = "abc" os.environ["JIRA_USERNAME"] = "123" os.environ["JIRA_INSTANCE_URL"] = "https://jira.atlassian.com" os.environ["OPENAI_API_KEY"] = "xyz" llm = OpenAI(temperature=0) jira =
JiraAPIWrapper()
langchain_community.utilities.jira.JiraAPIWrapper
from langchain_community.document_loaders import UnstructuredURLLoader urls = [ "https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-8-2023", "https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-9-2023", ] loader = UnstructuredURLLoader(urls=urls) data = loader.load() from langchain_community.document_loaders import SeleniumURLLoader urls = [ "https://www.youtube.com/watch?v=dQw4w9WgXcQ", "https://goo.gl/maps/NDSHwePEyaHMFGwh8", ] loader = SeleniumURLLoader(urls=urls) data = loader.load() get_ipython().run_line_magic('pip', 'install --upgrade --quiet "playwright"') get_ipython().run_line_magic('pip', 'install --upgrade --quiet "unstructured"') get_ipython().system('playwright install') from langchain_community.document_loaders import PlaywrightURLLoader urls = [ "https://www.youtube.com/watch?v=dQw4w9WgXcQ", "https://goo.gl/maps/NDSHwePEyaHMFGwh8", ] loader =
PlaywrightURLLoader(urls=urls, remove_selectors=["header", "footer"])
langchain_community.document_loaders.PlaywrightURLLoader
get_ipython().run_line_magic('pip', 'install -qU langchain-text-splitters') from langchain_text_splitters import ( Language, RecursiveCharacterTextSplitter, ) [e.value for e in Language]
RecursiveCharacterTextSplitter.get_separators_for_language(Language.PYTHON)
langchain_text_splitters.RecursiveCharacterTextSplitter.get_separators_for_language
get_ipython().run_line_magic('pip', 'install -qU langchain-text-splitters') from langchain_text_splitters import HTMLHeaderTextSplitter html_string = """ <!DOCTYPE html> <html> <body> <div> <h1>Foo</h1> <p>Some intro text about Foo.</p> <div> <h2>Bar main section</h2> <p>Some intro text about Bar.</p> <h3>Bar subsection 1</h3> <p>Some text about the first subtopic of Bar.</p> <h3>Bar subsection 2</h3> <p>Some text about the second subtopic of Bar.</p> </div> <div> <h2>Baz</h2> <p>Some text about Baz</p> </div> <br> <p>Some concluding text about Foo</p> </div> </body> </html> """ headers_to_split_on = [ ("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3"), ] html_splitter =
HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
langchain_text_splitters.HTMLHeaderTextSplitter