prompt
stringlengths
51
10k
completion
stringlengths
8
362
api
stringlengths
18
90
get_ipython().system('pip install pettingzoo pygame rlcard') import collections import inspect import tenacity from langchain.output_parsers import RegexParser from langchain.schema import ( HumanMessage, SystemMessage, ) from langchain_openai import ChatOpenAI class GymnasiumAgent: @classmethod def get_docs(cls, env): return env.unwrapped.__doc__ def __init__(self, model, env): self.model = model self.env = env self.docs = self.get_docs(env) self.instructions = """ Your goal is to maximize your return, i.e. the sum of the rewards you receive. I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as: Observation: <observation> Reward: <reward> Termination: <termination> Truncation: <truncation> Return: <sum_of_rewards> You will respond with an action, formatted as: Action: <action> where you replace <action> with your actual action. Do nothing else but return the action. """ self.action_parser =
RegexParser( regex=r"Action: (.*)
langchain.output_parsers.RegexParser
from langchain.chains import LLMMathChain from langchain_openai import OpenAI llm = OpenAI(temperature=0) llm_math =
LLMMathChain.from_llm(llm, verbose=True)
langchain.chains.LLMMathChain.from_llm
get_ipython().system('pip install --upgrade langchain langchain-google-vertexai') project: str = "PUT_YOUR_PROJECT_ID_HERE" # @param {type:"string"} endpoint_id: str = "PUT_YOUR_ENDPOINT_ID_HERE" # @param {type:"string"} location: str = "PUT_YOUR_ENDPOINT_LOCAtION_HERE" # @param {type:"string"} from langchain_google_vertexai import ( GemmaChatVertexAIModelGarden, GemmaVertexAIModelGarden, ) llm = GemmaVertexAIModelGarden( endpoint_id=endpoint_id, project=project, location=location, ) output = llm.invoke("What is the meaning of life?") print(output) from langchain_core.messages import HumanMessage llm = GemmaChatVertexAIModelGarden( endpoint_id=endpoint_id, project=project, location=location, ) message1 = HumanMessage(content="How much is 2+2?") answer1 = llm.invoke([message1]) print(answer1) message2 = HumanMessage(content="How much is 3+3?") answer2 = llm.invoke([message1, answer1, message2]) print(answer2) answer1 = llm.invoke([message1], parse_response=True) print(answer1) answer2 = llm.invoke([message1, answer1, message2], parse_response=True) print(answer2) get_ipython().system('mkdir -p ~/.kaggle && cp kaggle.json ~/.kaggle/kaggle.json') get_ipython().system('pip install keras>=3 keras_nlp') from langchain_google_vertexai import GemmaLocalKaggle keras_backend: str = "jax" # @param {type:"string"} model_name: str = "gemma_2b_en" # @param {type:"string"} llm =
GemmaLocalKaggle(model_name=model_name, keras_backend=keras_backend)
langchain_google_vertexai.GemmaLocalKaggle
get_ipython().run_line_magic('pip', 'install --upgrade --quiet feedparser newspaper3k listparser') from langchain_community.document_loaders import RSSFeedLoader urls = ["https://news.ycombinator.com/rss"] loader =
RSSFeedLoader(urls=urls)
langchain_community.document_loaders.RSSFeedLoader
from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.sentence_transformer import ( SentenceTransformerEmbeddings, ) from langchain_community.vectorstores import Chroma from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") db =
Chroma.from_documents(docs, embedding_function)
langchain_community.vectorstores.Chroma.from_documents
import os os.environ["SEARCHAPI_API_KEY"] = "" from langchain_community.utilities import SearchApiAPIWrapper search = SearchApiAPIWrapper() search.run("Obama's first name?") os.environ["OPENAI_API_KEY"] = "" from langchain.agents import AgentType, Tool, initialize_agent from langchain_community.utilities import SearchApiAPIWrapper from langchain_openai import OpenAI llm = OpenAI(temperature=0) search =
SearchApiAPIWrapper()
langchain_community.utilities.SearchApiAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-robocorp') from langchain.agents import AgentExecutor, OpenAIFunctionsAgent from langchain_core.messages import SystemMessage from langchain_openai import ChatOpenAI from langchain_robocorp import ActionServerToolkit llm = ChatOpenAI(model="gpt-4", temperature=0) toolkit = ActionServerToolkit(url="http://localhost:8080", report_trace=True) tools = toolkit.get_tools() system_message = SystemMessage(content="You are a helpful assistant") prompt =
OpenAIFunctionsAgent.create_prompt(system_message)
langchain.agents.OpenAIFunctionsAgent.create_prompt
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic') import os import boto3 comprehend_client = boto3.client("comprehend", region_name="us-east-1") from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain comprehend_moderation = AmazonComprehendModerationChain( client=comprehend_client, verbose=True, # optional ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( ModerationPiiError, ) template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comprehend_moderation | {"input": (lambda x: x["output"]) | llm} | comprehend_moderation ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?" } ) except ModerationPiiError as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import ( BaseModerationConfig, ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) pii_config =
ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
langchain_experimental.comprehend_moderation.ModerationPiiConfig
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain langchain-openai') from langchain.utils.math import cosine_similarity from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings physics_template = """You are a very smart physics professor. \ You are great at answering questions about physics in a concise and easy to understand manner. \ When you don't know the answer to a question you admit that you don't know. Here is a question: {query}""" math_template = """You are a very good mathematician. You are great at answering math questions. \ You are so good because you are able to break down hard problems into their component parts, \ answer the component parts, and then put them together to answer the broader question. Here is a question: {query}""" embeddings = OpenAIEmbeddings() prompt_templates = [physics_template, math_template] prompt_embeddings = embeddings.embed_documents(prompt_templates) def prompt_router(input): query_embedding = embeddings.embed_query(input["query"]) similarity = cosine_similarity([query_embedding], prompt_embeddings)[0] most_similar = prompt_templates[similarity.argmax()] print("Using MATH" if most_similar == math_template else "Using PHYSICS") return
PromptTemplate.from_template(most_similar)
langchain_core.prompts.PromptTemplate.from_template
get_ipython().run_line_magic('reload_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') from datetime import datetime from langchain.agents import AgentType, initialize_agent from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit from langchain_community.utilities.clickup import ClickupAPIWrapper from langchain_openai import OpenAI oauth_client_id = "ABC..." oauth_client_secret = "123..." redirect_uri = "https://google.com" print("Click this link, select your workspace, click `Connect Workspace`") print(ClickupAPIWrapper.get_access_code_url(oauth_client_id, redirect_uri)) code = "THISISMYCODERIGHTHERE" access_token = ClickupAPIWrapper.get_access_token( oauth_client_id, oauth_client_secret, code ) clickup_api_wrapper =
ClickupAPIWrapper(access_token=access_token)
langchain_community.utilities.clickup.ClickupAPIWrapper
meals = [ "Beef Enchiladas with Feta cheese. Mexican-Greek fusion", "Chicken Flatbreads with red sauce. Italian-Mexican fusion", "Veggie sweet potato quesadillas with vegan cheese", "One-Pan Tortelonni bake with peppers and onions", ] from langchain_openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-instruct") from langchain.prompts import PromptTemplate PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}". Embed the meal into the given text: "{text_to_personalize}". Prepend a personalized message including the user's name "{user}" and their preference "{preference}". Make it sound good. """ PROMPT = PromptTemplate( input_variables=["meal", "text_to_personalize", "user", "preference"], template=PROMPT_TEMPLATE, ) import langchain_experimental.rl_chain as rl_chain chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs \ believe you will love it!", ) print(response["response"]) for _ in range(5): try: response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) except Exception as e: print(e) print(response["response"]) print() scoring_criteria_template = ( "Given {preference} rank how good or bad this selection is {meal}" ) chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer( llm=llm, scoring_criteria_template_str=scoring_criteria_template ), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) print(response["response"]) selection_metadata = response["selection_metadata"] print( f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}" ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: print(event.based_on) print(event.to_select_from) selected_meal = event.to_select_from["meal"][event.selected.index] print(f"selected meal: {selected_meal}") if "Tom" in event.based_on["user"]: if "Vegetarian" in event.based_on["preference"]: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_preference(self, preference, selected_meal): if "Vegetarian" in preference: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: selected_meal = event.to_select_from["meal"][event.selected.index] if "Tom" in event.based_on["user"]: return self.score_preference(event.based_on["preference"], selected_meal) elif "Anna" in event.based_on["user"]: return self.score_preference(event.based_on["preference"], selected_meal) else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), metrics_step=5, metrics_window_size=5, # rolling window average ) random_chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), metrics_step=5, metrics_window_size=5, # rolling window average policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default ) for _ in range(20): try: chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) random_chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Anna"), preference=rl_chain.BasedOn(["Loves meat", "especially beef"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) random_chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Anna"), preference=rl_chain.BasedOn(["Loves meat", "especially beef"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) except Exception as e: print(e) from matplotlib import pyplot as plt chain.metrics.to_pandas()["score"].plot(label="default learning policy") random_chain.metrics.to_pandas()["score"].plot(label="random selection policy") plt.legend() print( f"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}" ) print( f"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}" ) from langchain.globals import set_debug from langchain.prompts.prompt import PromptTemplate set_debug(True) REWARD_PROMPT_TEMPLATE = """ Given {preference} rank how good or bad this selection is {meal} IMPORTANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good """ REWARD_PROMPT = PromptTemplate( input_variables=["preference", "meal"], template=REWARD_PROMPT_TEMPLATE, ) chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=
rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT)
langchain_experimental.rl_chain.AutoSelectionScorer
get_ipython().run_line_magic('pip', 'install --upgrade --quiet unstructured') from langchain_community.document_loaders import UnstructuredEmailLoader loader = UnstructuredEmailLoader("example_data/fake-email.eml") data = loader.load() data loader = UnstructuredEmailLoader("example_data/fake-email.eml", mode="elements") data = loader.load() data[0] loader = UnstructuredEmailLoader( "example_data/fake-email.eml", mode="elements", process_attachments=True, ) data = loader.load() data[0] get_ipython().run_line_magic('pip', 'install --upgrade --quiet extract_msg') from langchain_community.document_loaders import OutlookMessageLoader loader =
OutlookMessageLoader("example_data/fake-email.msg")
langchain_community.document_loaders.OutlookMessageLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints') import getpass import os if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): nvapi_key = getpass.getpass("Enter your NVIDIA API key: ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="mixtral_8x7b") result = llm.invoke("Write a ballad about LangChain.") print(result.content) print(llm.batch(["What's 2*3?", "What's 2*6?"])) for chunk in llm.stream("How far can a seagull fly in one day?"): print(chunk.content, end="|") async for chunk in llm.astream( "How long does it take for monarch butterflies to migrate?" ): print(chunk.content, end="|") ChatNVIDIA.get_available_models() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser() for txt in chain.stream({"input": "What's your name?"}): print(txt, end="") prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are an expert coding AI. Respond only in valid python; no narration whatsoever.", ), ("user", "{input}"), ] ) chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser() for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}): print(txt, end="") from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="nemotron_steerlm_8b") complex_result = llm.invoke( "What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0} ) print("Un-creative\n") print(complex_result.content) print("\n\nCreative\n") creative_result = llm.invoke( "What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9} ) print(creative_result.content) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = ( prompt | ChatNVIDIA(model="nemotron_steerlm_8b").bind( labels={"creativity": 9, "complexity": 0, "verbosity": 9} ) | StrOutputParser() ) for txt in chain.stream({"input": "Why is a PB&J?"}): print(txt, end="") import IPython import requests image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/[email protected]" ## Large Image image_content = requests.get(image_url).content IPython.display.Image(image_content) from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="playground_neva_22b") from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ], labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0}, ) import IPython import requests image_url = "https://picsum.photos/seed/kitten/300/200" image_content = requests.get(image_url).content IPython.display.Image(image_content) import base64 from langchain_core.messages import HumanMessage b64_string = base64.b64encode(image_content).decode("utf-8") llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, { "type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64_string}"}, }, ] ) ] ) base64_with_mime_type = f"data:image/png;base64,{b64_string}" llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />') from langchain_nvidia_ai_endpoints import ChatNVIDIA kosmos = ChatNVIDIA(model="kosmos_2") from langchain_core.messages import HumanMessage def drop_streaming_key(d): """Takes in payload dictionary, outputs new payload dictionary""" if "stream" in d: d.pop("stream") return d kosmos = ChatNVIDIA(model="kosmos_2") kosmos.client.payload_fn = drop_streaming_key kosmos.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) import base64 from io import BytesIO from PIL import Image img_gen = ChatNVIDIA(model="sdxl_turbo") def to_sdxl_payload(d): if d: d = {"prompt": d.get("messages", [{}])[0].get("content")} d["inference_steps"] = 4 ## why not add another argument? return d img_gen.client.payload_fn = to_sdxl_payload def to_pil_img(d): return Image.open(BytesIO(base64.b64decode(d))) (img_gen | StrOutputParser() | to_pil_img).invoke("white cat playing") from langchain_core.messages import ChatMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [ ChatMessage( role="context", content="Parrots and Cats have signed the peace accord." ), ("user", "{input}"), ] ) llm = ChatNVIDIA(model="nemotron_qa_8b") chain = prompt | llm | StrOutputParser() chain.invoke({"input": "What was signed?"}) get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain') from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory chat =
ChatNVIDIA(model="mixtral_8x7b", temperature=0.1, max_tokens=100, top_p=1.0)
langchain_nvidia_ai_endpoints.ChatNVIDIA
get_ipython().run_line_magic('pip', 'install --upgrade --quiet alibabacloud_ha3engine_vector') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain_community.vectorstores import ( AlibabaCloudOpenSearch, AlibabaCloudOpenSearchSettings, ) from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../../state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() settings = AlibabaCloudOpenSearchSettings( endpoint=" The endpoint of opensearch instance, You can find it from the console of Alibaba Cloud OpenSearch.", instance_id="The identify of opensearch instance, You can find it from the console of Alibaba Cloud OpenSearch.", protocol="Communication Protocol between SDK and Server, default is http.", username="The username specified when purchasing the instance.", password="The password specified when purchasing the instance.", namespace="The instance data will be partitioned based on the namespace field. If the namespace is enabled, you need to specify the namespace field name during initialization. Otherwise, the queries cannot be executed correctly.", tablename="The table name specified during instance configuration.", embedding_field_separator="Delimiter specified for writing vector field data, default is comma.", output_fields="Specify the field list returned when invoking OpenSearch, by default it is the value list of the field mapping field.", field_name_mapping={ "id": "id", # The id field name mapping of index document. "document": "document", # The text field name mapping of index document. "embedding": "embedding", # The embedding field name mapping of index document. "name_of_the_metadata_specified_during_search": "opensearch_metadata_field_name,=", }, ) opensearch = AlibabaCloudOpenSearch.from_texts( texts=docs, embedding=embeddings, config=settings ) opensearch =
AlibabaCloudOpenSearch(embedding=embeddings, config=settings)
langchain_community.vectorstores.AlibabaCloudOpenSearch
from typing import List from langchain.output_parsers import PydanticOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_openai import ChatOpenAI class Actor(BaseModel): name: str = Field(description="name of an actor") film_names: List[str] = Field(description="list of names of films they starred in") actor_query = "Generate the filmography for a random actor." parser =
PydanticOutputParser(pydantic_object=Actor)
langchain.output_parsers.PydanticOutputParser
get_ipython().system("python3 -m pip install --upgrade langchain 'deeplake[enterprise]' openai tiktoken") import getpass import os from langchain.chains import RetrievalQA from langchain_community.vectorstores import DeepLake from langchain_openai import OpenAI, OpenAIEmbeddings from langchain_text_splitters import ( CharacterTextSplitter, RecursiveCharacterTextSplitter, ) os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") activeloop_token = getpass.getpass("Activeloop Token:") os.environ["ACTIVELOOP_TOKEN"] = activeloop_token os.environ["ACTIVELOOP_ORG"] = getpass.getpass("Activeloop Org:") org_id = os.environ["ACTIVELOOP_ORG"] embeddings = OpenAIEmbeddings() dataset_path = "hub://" + org_id + "/data" with open("messages.txt") as f: state_of_the_union = f.read() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) pages = text_splitter.split_text(state_of_the_union) text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) texts = text_splitter.create_documents(pages) print(texts) dataset_path = "hub://" + org_id + "/data" embeddings = OpenAIEmbeddings() db = DeepLake.from_documents( texts, embeddings, dataset_path=dataset_path, overwrite=True ) db =
DeepLake(dataset_path=dataset_path, read_only=True, embedding=embeddings)
langchain_community.vectorstores.DeepLake
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from langchain.prompts import PromptTemplate from langchain_community.utilities import GoogleSearchAPIWrapper from langchain_openai import OpenAI template = """This is a conversation between a human and a bot: {chat_history} Write a summary of the conversation for {input}: """ prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template) memory = ConversationBufferMemory(memory_key="chat_history") readonlymemory = ReadOnlySharedMemory(memory=memory) summary_chain = LLMChain( llm=OpenAI(), prompt=prompt, verbose=True, memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory ) search = GoogleSearchAPIWrapper() tools = [ Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events", ), Tool( name="Summary", func=summary_chain.run, description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.", ), ] prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:""" suffix = """Begin!" {chat_history} Question: {input} {agent_scratchpad}""" prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=["input", "chat_history", "agent_scratchpad"], ) llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt) agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True) agent_chain = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=True, memory=memory ) agent_chain.run(input="What is ChatGPT?") agent_chain.run(input="Who developed it?") agent_chain.run( input="Thanks. Summarize the conversation, for my daughter 5 years old." ) print(agent_chain.memory.buffer) template = """This is a conversation between a human and a bot: {chat_history} Write a summary of the conversation for {input}: """ prompt =
PromptTemplate(input_variables=["input", "chat_history"], template=template)
langchain.prompts.PromptTemplate
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core databricks-vectorsearch langchain-openai tiktoken') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain_community.document_loaders import TextLoader from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() emb_dim = len(embeddings.embed_query("hello")) from databricks.vector_search.client import VectorSearchClient vsc = VectorSearchClient() vsc.create_endpoint(name="vector_search_demo_endpoint", endpoint_type="STANDARD") vector_search_endpoint_name = "vector_search_demo_endpoint" index_name = "ml.llm.demo_index" index = vsc.create_direct_access_index( endpoint_name=vector_search_endpoint_name, index_name=index_name, primary_key="id", embedding_dimension=emb_dim, embedding_vector_column="text_vector", schema={ "id": "string", "text": "string", "text_vector": "array<float>", "source": "string", }, ) index.describe() from langchain_community.vectorstores import DatabricksVectorSearch dvs = DatabricksVectorSearch( index, text_column="text", embedding=embeddings, columns=["source"] ) dvs.add_documents(docs) query = "What did the president say about Ketanji Brown Jackson" dvs.similarity_search(query) print(docs[0].page_content) dvs_delta_sync =
DatabricksVectorSearch("catalog_name.schema_name.delta_sync_index")
langchain_community.vectorstores.DatabricksVectorSearch
get_ipython().run_line_magic('reload_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') from datetime import datetime from langchain.agents import AgentType, initialize_agent from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit from langchain_community.utilities.clickup import ClickupAPIWrapper from langchain_openai import OpenAI oauth_client_id = "ABC..." oauth_client_secret = "123..." redirect_uri = "https://google.com" print("Click this link, select your workspace, click `Connect Workspace`") print(
ClickupAPIWrapper.get_access_code_url(oauth_client_id, redirect_uri)
langchain_community.utilities.clickup.ClickupAPIWrapper.get_access_code_url
meals = [ "Beef Enchiladas with Feta cheese. Mexican-Greek fusion", "Chicken Flatbreads with red sauce. Italian-Mexican fusion", "Veggie sweet potato quesadillas with vegan cheese", "One-Pan Tortelonni bake with peppers and onions", ] from langchain_openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-instruct") from langchain.prompts import PromptTemplate PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}". Embed the meal into the given text: "{text_to_personalize}". Prepend a personalized message including the user's name "{user}" and their preference "{preference}". Make it sound good. """ PROMPT = PromptTemplate( input_variables=["meal", "text_to_personalize", "user", "preference"], template=PROMPT_TEMPLATE, ) import langchain_experimental.rl_chain as rl_chain chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs \ believe you will love it!", ) print(response["response"]) for _ in range(5): try: response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) except Exception as e: print(e) print(response["response"]) print() scoring_criteria_template = ( "Given {preference} rank how good or bad this selection is {meal}" ) chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer( llm=llm, scoring_criteria_template_str=scoring_criteria_template ), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) print(response["response"]) selection_metadata = response["selection_metadata"] print( f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}" ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: print(event.based_on) print(event.to_select_from) selected_meal = event.to_select_from["meal"][event.selected.index] print(f"selected meal: {selected_meal}") if "Tom" in event.based_on["user"]: if "Vegetarian" in event.based_on["preference"]: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=
rl_chain.BasedOn("Tom")
langchain_experimental.rl_chain.BasedOn
from langchain_community.graphs import NeptuneGraph host = "<neptune-host>" port = 8182 use_https = True graph =
NeptuneGraph(host=host, port=port, use_https=use_https)
langchain_community.graphs.NeptuneGraph
get_ipython().system('pip3 install cerebrium') import os from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import CerebriumAI os.environ["CEREBRIUMAI_API_KEY"] = "YOUR_KEY_HERE" llm =
CerebriumAI(endpoint_url="YOUR ENDPOINT URL HERE")
langchain_community.llms.CerebriumAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 langchain-openai tiktoken python-dotenv') get_ipython().run_line_magic('pip', 'install --upgrade --quiet "amazon-textract-caller>=0.2.0"') from langchain_community.document_loaders import AmazonTextractPDFLoader loader = AmazonTextractPDFLoader("example_data/alejandro_rosalez_sample-small.jpeg") documents = loader.load() documents from langchain_community.document_loaders import AmazonTextractPDFLoader loader =
AmazonTextractPDFLoader( "https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg" )
langchain_community.document_loaders.AmazonTextractPDFLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet trubrics') import os os.environ["TRUBRICS_EMAIL"] = "***@***" os.environ["TRUBRICS_PASSWORD"] = "***" os.environ["OPENAI_API_KEY"] = "sk-***" from langchain.callbacks import TrubricsCallbackHandler from langchain_openai import OpenAI llm = OpenAI(callbacks=[TrubricsCallbackHandler()]) res = llm.generate(["Tell me a joke", "Write me a poem"]) print("--> GPT's joke: ", res.generations[0][0].text) print() print("--> GPT's poem: ", res.generations[1][0].text) from langchain.callbacks import TrubricsCallbackHandler from langchain_core.messages import HumanMessage, SystemMessage from langchain_openai import ChatOpenAI chat_llm = ChatOpenAI( callbacks=[ TrubricsCallbackHandler( project="default", tags=["chat model"], user_id="user-id-1234", some_metadata={"hello": [1, 2]}, ) ] ) chat_res = chat_llm( [
SystemMessage(content="Every answer of yours must be about OpenAI.")
langchain_core.messages.SystemMessage
import os from langchain_community.utilities import OpenWeatherMapAPIWrapper os.environ["OPENWEATHERMAP_API_KEY"] = "" weather =
OpenWeatherMapAPIWrapper()
langchain_community.utilities.OpenWeatherMapAPIWrapper
from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.prompts import PromptTemplate from langchain_community.llms import TitanTakeoffPro llm = TitanTakeoffPro() output = llm("What is the weather in London in August?") print(output) llm = TitanTakeoffPro( base_url="http://localhost:3000", min_new_tokens=128, max_new_tokens=512, no_repeat_ngram_size=2, sampling_topk=1, sampling_topp=1.0, sampling_temperature=1.0, repetition_penalty=1.0, regex_string="", ) output = llm("What is the largest rainforest in the world?") print(output) llm =
TitanTakeoffPro()
langchain_community.llms.TitanTakeoffPro
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azureml-fsspec, azure-ai-generative') from azure.ai.resources.client import AIClient from azure.identity import DefaultAzureCredential from langchain_community.document_loaders import AzureAIDataLoader client = AIClient( credential=DefaultAzureCredential(), subscription_id="<subscription_id>", resource_group_name="<resource_group_name>", project_name="<project_name>", ) data_asset = client.data.get(name="<data_asset_name>", label="latest") loader =
AzureAIDataLoader(url=data_asset.path)
langchain_community.document_loaders.AzureAIDataLoader
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-community') get_ipython().run_line_magic('pip', 'install --pre --upgrade bigdl-llm[all]') from langchain.chains import LLMChain from langchain_community.llms.bigdl import BigdlLLM from langchain_core.prompts import PromptTemplate template = "USER: {question}\nASSISTANT:" prompt =
PromptTemplate(template=template, input_variables=["question"])
langchain_core.prompts.PromptTemplate
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyvespa') from vespa.package import ApplicationPackage, Field, RankProfile app_package = ApplicationPackage(name="testapp") app_package.schema.add_fields( Field( name="text", type="string", indexing=["index", "summary"], index="enable-bm25" ), Field( name="embedding", type="tensor<float>(x[384])", indexing=["attribute", "summary"], attribute=["distance-metric: angular"], ), ) app_package.schema.add_rank_profile( RankProfile( name="default", first_phase="closeness(field, embedding)", inputs=[("query(query_embedding)", "tensor<float>(x[384])")], ) ) from vespa.deployment import VespaDocker vespa_docker = VespaDocker() vespa_app = vespa_docker.deploy(application_package=app_package) from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) from langchain_community.embeddings.sentence_transformer import ( SentenceTransformerEmbeddings, ) embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") vespa_config = dict( page_content_field="text", embedding_field="embedding", input_field="query_embedding", ) from langchain_community.vectorstores import VespaStore db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config) query = "What did the president say about Ketanji Brown Jackson" results = db.similarity_search(query) print(results[0].page_content) query = "What did the president say about Ketanji Brown Jackson" results = db.similarity_search(query) result = results[0] result.page_content = "UPDATED: " + result.page_content db.add_texts([result.page_content], [result.metadata], result.metadata["id"]) results = db.similarity_search(query) print(results[0].page_content) result = db.similarity_search(query) db.delete(["32"]) result = db.similarity_search(query) results = db.similarity_search_with_score(query) result = results[0] db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config) retriever = db.as_retriever() query = "What did the president say about Ketanji Brown Jackson" results = retriever.get_relevant_documents(query) app_package.schema.add_fields( Field(name="date", type="string", indexing=["attribute", "summary"]), Field(name="rating", type="int", indexing=["attribute", "summary"]), Field(name="author", type="string", indexing=["attribute", "summary"]), ) vespa_app = vespa_docker.deploy(application_package=app_package) for i, doc in enumerate(docs): doc.metadata["date"] = f"2023-{(i % 12)+1}-{(i % 28)+1}" doc.metadata["rating"] = range(1, 6)[i % 5] doc.metadata["author"] = ["Joe Biden", "Unknown"][min(i, 1)] vespa_config.update(dict(metadata_fields=["date", "rating", "author"])) db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config) query = "What did the president say about Ketanji Brown Jackson" results = db.similarity_search(query, filter="rating > 3") from vespa.package import FieldSet app_package.schema.add_field_set(FieldSet(name="default", fields=["text"])) app_package.schema.add_rank_profile(RankProfile(name="bm25", first_phase="bm25(text)")) vespa_app = vespa_docker.deploy(application_package=app_package) db =
VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
langchain_community.vectorstores.VespaStore.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet networkx') from langchain.indexes import GraphIndexCreator from langchain_openai import OpenAI index_creator = GraphIndexCreator(llm=OpenAI(temperature=0)) with open("../../../modules/state_of_the_union.txt") as f: all_text = f.read() text = "\n".join(all_text.split("\n\n")[105:108]) text graph = index_creator.from_text(text) graph.get_triples() from langchain.chains import GraphQAChain chain = GraphQAChain.from_llm(
OpenAI(temperature=0)
langchain_openai.OpenAI
from langchain.agents import load_tools requests_tools = load_tools(["requests_all"]) requests_tools requests_tools[0].requests_wrapper from langchain_community.utilities import TextRequestsWrapper requests = TextRequestsWrapper() requests.get("https://www.google.com") from langchain_community.utilities.requests import JsonRequestsWrapper requests =
JsonRequestsWrapper()
langchain_community.utilities.requests.JsonRequestsWrapper
import os os.environ["SERPER_API_KEY"] = "" os.environ["OPENAI_API_KEY"] = "" from typing import Any, List from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_community.utilities import GoogleSerperAPIWrapper from langchain_core.documents import Document from langchain_core.retrievers import BaseRetriever from langchain_openai import ChatOpenAI, OpenAI class SerperSearchRetriever(BaseRetriever): search: GoogleSerperAPIWrapper = None def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any ) -> List[Document]: return [Document(page_content=self.search.run(query))] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: raise NotImplementedError() retriever = SerperSearchRetriever(search=GoogleSerperAPIWrapper()) from langchain.globals import set_verbose
set_verbose(True)
langchain.globals.set_verbose
model_url = "http://localhost:5000" from langchain.chains import LLMChain from langchain.globals import set_debug from langchain.prompts import PromptTemplate from langchain_community.llms import TextGen
set_debug(True)
langchain.globals.set_debug
from langchain.agents import Tool from langchain_community.tools.file_management.read import ReadFileTool from langchain_community.tools.file_management.write import WriteFileTool from langchain_community.utilities import SerpAPIWrapper search = SerpAPIWrapper() tools = [ Tool( name="search", func=search.run, description="useful for when you need to answer questions about current events. You should ask targeted questions", ), WriteFileTool(), ReadFileTool(), ] from langchain.docstore import InMemoryDocstore from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings embeddings_model = OpenAIEmbeddings() import faiss embedding_size = 1536 index = faiss.IndexFlatL2(embedding_size) vectorstore = FAISS(embeddings_model.embed_query, index,
InMemoryDocstore({})
langchain.docstore.InMemoryDocstore
from langchain.prompts.pipeline import PipelinePromptTemplate from langchain.prompts.prompt import PromptTemplate full_template = """{introduction} {example} {start}""" full_prompt = PromptTemplate.from_template(full_template) introduction_template = """You are impersonating {person}.""" introduction_prompt =
PromptTemplate.from_template(introduction_template)
langchain.prompts.prompt.PromptTemplate.from_template
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml') path = "/Users/rlm/Desktop/Papers/LLaVA/" from typing import Any from pydantic import BaseModel from unstructured.partition.pdf import partition_pdf raw_pdf_elements = partition_pdf( filename=path + "LLaVA.pdf", extract_images_in_pdf=True, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) category_counts = {} for element in raw_pdf_elements: category = str(type(element)) if category in category_counts: category_counts[category] += 1 else: category_counts[category] = 1 unique_categories = set(category_counts.keys()) category_counts class Element(BaseModel): type: str text: Any categorized_elements = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): categorized_elements.append(Element(type="table", text=str(element))) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): categorized_elements.append(Element(type="text", text=str(element))) table_elements = [e for e in categorized_elements if e.type == "table"] print(len(table_elements)) text_elements = [e for e in categorized_elements if e.type == "text"] print(len(text_elements)) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI prompt_text = """You are an assistant tasked with summarizing tables and text. \ Give a concise summary of the table or text. Table or text chunk: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() texts = [i.text for i in text_elements] text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) tables = [i.text for i in table_elements] table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n') import glob import os file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt"))) img_summaries = [] for file_path in file_paths: with open(file_path, "r") as file: img_summaries.append(file.read()) logging_header = "clip_model_load: total allocated memory: 201.27 MB\n\n" cleaned_img_summary = [s.split(logging_header, 1)[1].strip() for s in img_summaries] import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings()) store = InMemoryStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key, ) doc_ids = [str(uuid.uuid4()) for _ in texts] summary_texts = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(text_summaries) ] retriever.vectorstore.add_documents(summary_texts) retriever.docstore.mset(list(zip(doc_ids, texts))) table_ids = [str(uuid.uuid4()) for _ in tables] summary_tables = [ Document(page_content=s, metadata={id_key: table_ids[i]}) for i, s in enumerate(table_summaries) ] retriever.vectorstore.add_documents(summary_tables) retriever.docstore.mset(list(zip(table_ids, tables))) img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary] summary_img = [ Document(page_content=s, metadata={id_key: img_ids[i]}) for i, s in enumerate(cleaned_img_summary) ] retriever.vectorstore.add_documents(summary_img) retriever.docstore.mset(list(zip(img_ids, cleaned_img_summary))) img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary] summary_img = [ Document(page_content=s, metadata={id_key: img_ids[i]}) for i, s in enumerate(cleaned_img_summary) ] retriever.vectorstore.add_documents(summary_img) retriever.docstore.mset( list( zip( img_ids, ) ) ) tables[2] table_summaries[2] retriever.get_relevant_documents( "What are results for LLaMA across across domains / subjects?" )[1] retriever.get_relevant_documents("Images / figures with playful and creative examples")[ 1 ] from langchain_core.runnables import RunnablePassthrough template = """Answer the question based only on the following context, which can include text and tables: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI(temperature=0, model="gpt-4") chain = ( {"context": retriever, "question":
RunnablePassthrough()
langchain_core.runnables.RunnablePassthrough
from langchain.prompts import ( ChatPromptTemplate, FewShotChatMessagePromptTemplate, ) examples = [ {"input": "2+2", "output": "4"}, {"input": "2+3", "output": "5"}, ] example_prompt =
ChatPromptTemplate.from_messages( [ ("human", "{input}")
langchain.prompts.ChatPromptTemplate.from_messages
get_ipython().system(' pip install langchain replicate') from langchain_community.chat_models import ChatOllama llama2_chat =
ChatOllama(model="llama2:13b-chat")
langchain_community.chat_models.ChatOllama
from langchain.memory import ConversationKGMemory from langchain_openai import OpenAI llm = OpenAI(temperature=0) memory = ConversationKGMemory(llm=llm) memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"}) memory.save_context({"input": "sam is a friend"}, {"output": "okay"}) memory.load_memory_variables({"input": "who is sam"}) memory =
ConversationKGMemory(llm=llm, return_messages=True)
langchain.memory.ConversationKGMemory
from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_community.chat_models import JinaChat from langchain_core.messages import HumanMessage, SystemMessage chat = JinaChat(temperature=0) messages = [ SystemMessage( content="You are a helpful assistant that translates English to French." ), HumanMessage( content="Translate this sentence from English to French. I love programming." ), ] chat(messages) template = ( "You are a helpful assistant that translates {input_language} to {output_language}." ) system_message_prompt =
SystemMessagePromptTemplate.from_template(template)
langchain.prompts.chat.SystemMessagePromptTemplate.from_template
from langchain.indexes import SQLRecordManager, index from langchain_core.documents import Document from langchain_elasticsearch import ElasticsearchStore from langchain_openai import OpenAIEmbeddings collection_name = "test_index" embedding = OpenAIEmbeddings() vectorstore = ElasticsearchStore( es_url="http://localhost:9200", index_name="test_index", embedding=embedding ) namespace = f"elasticsearch/{collection_name}" record_manager = SQLRecordManager( namespace, db_url="sqlite:///record_manager_cache.sql" ) record_manager.create_schema() doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"}) doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"}) def _clear(): """Hacky helper method to clear content. See the `full` mode section to to understand why it works.""" index([], record_manager, vectorstore, cleanup="full", source_id_key="source") _clear() index( [doc1, doc1, doc1, doc1, doc1], record_manager, vectorstore, cleanup=None, source_id_key="source", ) _clear()
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
langchain.indexes.index
from langchain.agents import AgentExecutor, BaseMultiActionAgent, Tool from langchain_community.utilities import SerpAPIWrapper def random_word(query: str) -> str: print("\nNow I'm doing this!") return "foo" search = SerpAPIWrapper() tools = [ Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events", ), Tool( name="RandomWord", func=random_word, description="call this to get a random word.", ), ] from typing import Any, List, Tuple, Union from langchain_core.agents import AgentAction, AgentFinish class FakeAgent(BaseMultiActionAgent): """Fake Custom Agent.""" @property def input_keys(self): return ["input"] def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) -> Union[List[AgentAction], AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. Returns: Action specifying what tool to use. """ if len(intermediate_steps) == 0: return [ AgentAction(tool="Search", tool_input=kwargs["input"], log=""), AgentAction(tool="RandomWord", tool_input=kwargs["input"], log=""), ] else: return AgentFinish(return_values={"output": "bar"}, log="") async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) -> Union[List[AgentAction], AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. Returns: Action specifying what tool to use. """ if len(intermediate_steps) == 0: return [ AgentAction(tool="Search", tool_input=kwargs["input"], log=""), AgentAction(tool="RandomWord", tool_input=kwargs["input"], log=""), ] else: return
AgentFinish(return_values={"output": "bar"}, log="")
langchain_core.agents.AgentFinish
import warnings warnings.filterwarnings("ignore") import os from langchain.agents.agent_toolkits import create_conversational_retrieval_agent from langchain_community.agent_toolkits import CogniswitchToolkit from langchain_openai import ChatOpenAI cs_token = "Your CogniSwitch token" OAI_token = "Your OpenAI API token" oauth_token = "Your CogniSwitch authentication token" os.environ["OPENAI_API_KEY"] = OAI_token cogniswitch_toolkit = CogniswitchToolkit( cs_token=cs_token, OAI_token=OAI_token, apiKey=oauth_token ) tool_lst = cogniswitch_toolkit.get_tools() llm = ChatOpenAI( temperature=0, openai_api_key=OAI_token, max_tokens=1500, model_name="gpt-3.5-turbo-0613", ) agent_executor =
create_conversational_retrieval_agent(llm, tool_lst, verbose=False)
langchain.agents.agent_toolkits.create_conversational_retrieval_agent
get_ipython().run_line_magic('pip', 'install -qU langchain-anthropic defusedxml') from langchain_anthropic.experimental import ChatAnthropicTools from langchain_core.pydantic_v1 import BaseModel class Person(BaseModel): name: str age: int model =
ChatAnthropicTools(model="claude-3-opus-20240229")
langchain_anthropic.experimental.ChatAnthropicTools
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic') import os import boto3 comprehend_client = boto3.client("comprehend", region_name="us-east-1") from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain comprehend_moderation = AmazonComprehendModerationChain( client=comprehend_client, verbose=True, # optional ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( ModerationPiiError, ) template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comprehend_moderation | {"input": (lambda x: x["output"]) | llm} | comprehend_moderation ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?" } ) except ModerationPiiError as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import ( BaseModerationConfig, ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5) moderation_config = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler class MyModCallback(BaseModerationCallbackHandler): async def on_after_pii(self, output_beacon, unique_id): import json moderation_type = output_beacon["moderation_type"] chain_id = output_beacon["moderation_chain_id"] with open(f"output-{moderation_type}-{chain_id}.json", "w") as file: data = {"beacon_data": output_beacon, "unique_id": unique_id} json.dump(data, file) """ async def on_after_toxicity(self, output_beacon, unique_id): pass async def on_after_prompt_safety(self, output_beacon, unique_id): pass """ my_callback = MyModCallback() pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config]) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client unique_id="[email protected]", # A unique ID moderation_callback=my_callback, # BaseModerationCallbackHandler verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub') import os os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>" repo_id = "google/flan-t5-xxl" from langchain.prompts import PromptTemplate from langchain_community.llms import HuggingFaceHub template = """{question}""" prompt = PromptTemplate.from_template(template) llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256} ) pii_config = ModerationPiiConfig( labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X" ) toxicity_config =
ModerationToxicityConfig(threshold=0.5)
langchain_experimental.comprehend_moderation.ModerationToxicityConfig
get_ipython().run_line_magic('pip', 'install --upgrade --quiet amadeus > /dev/null') import os os.environ["AMADEUS_CLIENT_ID"] = "CLIENT_ID" os.environ["AMADEUS_CLIENT_SECRET"] = "CLIENT_SECRET" os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY" from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit toolkit = AmadeusToolkit() tools = toolkit.get_tools() from langchain_community.llms import HuggingFaceHub os.environ["HUGGINGFACEHUB_API_TOKEN"] = "YOUR_HF_API_TOKEN" llm = HuggingFaceHub( repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.5, "max_length": 64}, ) toolkit_hf =
AmadeusToolkit(llm=llm)
langchain_community.agent_toolkits.amadeus.toolkit.AmadeusToolkit
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-firestore') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable firestore.googleapis.com') from langchain_core.documents.base import Document from langchain_google_firestore import FirestoreSaver saver = FirestoreSaver() data = [Document(page_content="Hello, World!")] saver.upsert_documents(data) saver = FirestoreSaver("Collection") saver.upsert_documents(data) doc_ids = ["AnotherCollection/doc_id", "foo/bar"] saver = FirestoreSaver() saver.upsert_documents(documents=data, document_ids=doc_ids) from langchain_google_firestore import FirestoreLoader loader_collection = FirestoreLoader("Collection") loader_subcollection = FirestoreLoader("Collection/doc/SubCollection") data_collection = loader_collection.load() data_subcollection = loader_subcollection.load() from google.cloud import firestore client = firestore.Client() doc_ref = client.collection("foo").document("bar") loader_document = FirestoreLoader(doc_ref) data = loader_document.load() from google.cloud.firestore import CollectionGroup, FieldFilter, Query col_ref = client.collection("col_group") collection_group = CollectionGroup(col_ref) loader_group =
FirestoreLoader(collection_group)
langchain_google_firestore.FirestoreLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy') from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings embedder =
SpacyEmbeddings(model_name="en_core_web_sm")
langchain_community.embeddings.spacy_embeddings.SpacyEmbeddings
get_ipython().run_line_magic('pip', 'install --upgrade --quiet apify-client langchain-openai langchain chromadb tiktoken') from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders.base import Document from langchain_community.utilities import ApifyWrapper import os os.environ["OPENAI_API_KEY"] = "Your OpenAI API key" os.environ["APIFY_API_TOKEN"] = "Your Apify API token" apify =
ApifyWrapper()
langchain_community.utilities.ApifyWrapper
REGION = "us-central1" # @param {type:"string"} INSTANCE = "test-instance" # @param {type:"string"} DB_USER = "sqlserver" # @param {type:"string"} DB_PASS = "password" # @param {type:"string"} DATABASE = "test" # @param {type:"string"} TABLE_NAME = "test-default" # @param {type:"string"} get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-cloud-sql-mssql') from google.colab import auth auth.authenticate_user() PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') get_ipython().system('gcloud services enable sqladmin.googleapis.com') from langchain_google_cloud_sql_mssql import MSSQLEngine engine = MSSQLEngine.from_instance( project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE, user=DB_USER, password=DB_PASS, ) engine.init_document_table(TABLE_NAME, overwrite_existing=True) from langchain_core.documents import Document from langchain_google_cloud_sql_mssql import MSSQLDocumentSaver test_docs = [ Document( page_content="Apple Granny Smith 150 0.99 1", metadata={"fruit_id": 1}, ), Document( page_content="Banana Cavendish 200 0.59 0", metadata={"fruit_id": 2}, ), Document( page_content="Orange Navel 80 1.29 1", metadata={"fruit_id": 3}, ), ] saver = MSSQLDocumentSaver(engine=engine, table_name=TABLE_NAME) saver.add_documents(test_docs) from langchain_google_cloud_sql_mssql import MSSQLLoader loader =
MSSQLLoader(engine=engine, table_name=TABLE_NAME)
langchain_google_cloud_sql_mssql.MSSQLLoader
from langchain.chains import ConversationChain from langchain.memory import ( CombinedMemory, ConversationBufferMemory, ConversationSummaryMemory, ) from langchain.prompts import PromptTemplate from langchain_openai import OpenAI conv_memory = ConversationBufferMemory( memory_key="chat_history_lines", input_key="input" ) summary_memory = ConversationSummaryMemory(llm=OpenAI(), input_key="input") memory = CombinedMemory(memories=[conv_memory, summary_memory]) _DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Summary of conversation: {history} Current conversation: {chat_history_lines} Human: {input} AI:""" PROMPT = PromptTemplate( input_variables=["history", "input", "chat_history_lines"], template=_DEFAULT_TEMPLATE, ) llm = OpenAI(temperature=0) conversation =
ConversationChain(llm=llm, verbose=True, memory=memory, prompt=PROMPT)
langchain.chains.ConversationChain
from langchain.chains import RetrievalQA from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../state_of_the_union.txt", encoding="utf-8") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) for i, text in enumerate(texts): text.metadata["source"] = f"{i}-pl" embeddings = OpenAIEmbeddings() docsearch =
Chroma.from_documents(texts, embeddings)
langchain_community.vectorstores.Chroma.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opaqueprompts langchain') import os os.environ["OPAQUEPROMPTS_API_KEY"] = "<OPAQUEPROMPTS_API_KEY>" os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>" from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.chains import LLMChain from langchain.globals import set_debug, set_verbose from langchain.memory import ConversationBufferWindowMemory from langchain.prompts import PromptTemplate from langchain_community.llms import OpaquePrompts from langchain_openai import OpenAI set_debug(True) set_verbose(True) prompt_template = """ As an AI assistant, you will answer questions according to given context. Sensitive personal information in the question is masked for privacy. For instance, if the original text says "Giana is good," it will be changed to "PERSON_998 is good." Here's how to handle these changes: * Consider these masked phrases just as placeholders, but still refer to them in a relevant way when answering. * It's possible that different masked terms might mean the same thing. Stick with the given term and don't modify it. * All masked terms follow the "TYPE_ID" pattern. * Please don't invent new masked terms. For instance, if you see "PERSON_998," don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question. Conversation History: ```{history}``` Context : ```During our recent meeting on February 23, 2023, at 10:30 AM, John Doe provided me with his personal details. His email is [email protected] and his contact number is 650-456-7890. He lives in New York City, USA, and belongs to the American nationality with Christian beliefs and a leaning towards the Democratic party. He mentioned that he recently made a transaction using his credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website as https://johndoeportfolio.com. John also discussed some of his US-specific details. He said his bank account number is 1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is 123456789. He emphasized not to share his SSN, which is 123-45-6789. Furthermore, he mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has a medical license number MED-123456. ``` Question: ```{question}``` """ chain = LLMChain( prompt=PromptTemplate.from_template(prompt_template), llm=OpaquePrompts(base_llm=OpenAI()), memory=ConversationBufferWindowMemory(k=2), verbose=True, ) print( chain.run( { "question": """Write a message to remind John to do password reset for his website to stay secure.""" }, callbacks=[
StdOutCallbackHandler()
langchain.callbacks.stdout.StdOutCallbackHandler
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic') import os import boto3 comprehend_client = boto3.client("comprehend", region_name="us-east-1") from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain comprehend_moderation = AmazonComprehendModerationChain( client=comprehend_client, verbose=True, # optional ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( ModerationPiiError, ) template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comprehend_moderation | {"input": (lambda x: x["output"]) | llm} | comprehend_moderation ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?" } ) except ModerationPiiError as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import ( BaseModerationConfig, ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5) moderation_config = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler class MyModCallback(BaseModerationCallbackHandler): async def on_after_pii(self, output_beacon, unique_id): import json moderation_type = output_beacon["moderation_type"] chain_id = output_beacon["moderation_chain_id"] with open(f"output-{moderation_type}-{chain_id}.json", "w") as file: data = {"beacon_data": output_beacon, "unique_id": unique_id} json.dump(data, file) """ async def on_after_toxicity(self, output_beacon, unique_id): pass async def on_after_prompt_safety(self, output_beacon, unique_id): pass """ my_callback = MyModCallback() pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config]) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client unique_id="[email protected]", # A unique ID moderation_callback=my_callback, # BaseModerationCallbackHandler verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub') import os os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>" repo_id = "google/flan-t5-xxl" from langchain.prompts import PromptTemplate from langchain_community.llms import HuggingFaceHub template = """{question}""" prompt = PromptTemplate.from_template(template) llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256} ) pii_config = ModerationPiiConfig( labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X" ) toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8) moderation_config_1 = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) moderation_config_2 = BaseModerationConfig(filters=[pii_config]) amazon_comp_moderation = AmazonComprehendModerationChain( moderation_config=moderation_config_1, client=comprehend_client, moderation_callback=my_callback, verbose=True, ) amazon_comp_moderation_out = AmazonComprehendModerationChain( moderation_config=moderation_config_2, client=comprehend_client, verbose=True ) chain = ( prompt | amazon_comp_moderation | {"input": (lambda x: x["output"]) | llm} | amazon_comp_moderation_out ) try: response = chain.invoke( { "question": """What is John Doe's address, phone number and SSN from the following text? John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at [email protected] reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place. """ } ) except Exception as e: print(str(e)) else: print(response["output"]) endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name region = "<REGION>" # replace with your SageMaker Endpoint region import json from langchain.prompts import PromptTemplate from langchain_community.llms import SagemakerEndpoint from langchain_community.llms.sagemaker_endpoint import LLMContentHandler class ContentHandler(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: dict) -> bytes: input_str = json.dumps({"text_inputs": prompt, **model_kwargs}) return input_str.encode("utf-8") def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json["generated_texts"][0] content_handler = ContentHandler() template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer. Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at [email protected] reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place. Question: {question} Answer: """ llm_prompt = PromptTemplate.from_template(template) llm = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region, model_kwargs={ "temperature": 0.95, "max_length": 200, "num_return_sequences": 3, "top_k": 50, "top_p": 0.95, "do_sample": True, }, content_handler=content_handler, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) moderation_config_1 = BaseModerationConfig(filters=[pii_config, toxicity_config]) moderation_config_2 =
BaseModerationConfig(filters=[pii_config])
langchain_experimental.comprehend_moderation.BaseModerationConfig
get_ipython().run_line_magic('pip', 'install --upgrade --quiet promptlayer --upgrade') import promptlayer # Don't forget this 🍰 from langchain.callbacks import PromptLayerCallbackHandler from langchain.schema import ( HumanMessage, ) from langchain_openai import ChatOpenAI chat_llm = ChatOpenAI( temperature=0, callbacks=[PromptLayerCallbackHandler(pl_tags=["chatopenai"])], ) llm_results = chat_llm( [ HumanMessage(content="What comes after 1,2,3 ?"), HumanMessage(content="Tell me another joke?"), ] ) print(llm_results) import promptlayer # Don't forget this 🍰 from langchain.callbacks import PromptLayerCallbackHandler from langchain_community.llms import GPT4All model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8) response = model( "Once upon a time, ", callbacks=[
PromptLayerCallbackHandler(pl_tags=["langchain", "gpt4all"])
langchain.callbacks.PromptLayerCallbackHandler
get_ipython().run_line_magic('pip', 'install --upgrade --quiet dashvector dashscope') import getpass import os os.environ["DASHVECTOR_API_KEY"] = getpass.getpass("DashVector API Key:") os.environ["DASHSCOPE_API_KEY"] = getpass.getpass("DashScope API Key:") from langchain_community.embeddings.dashscope import DashScopeEmbeddings from langchain_community.vectorstores import DashVector from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings =
DashScopeEmbeddings()
langchain_community.embeddings.dashscope.DashScopeEmbeddings
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3') from langchain.retrievers import AmazonKendraRetriever retriever =
AmazonKendraRetriever(index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03")
langchain.retrievers.AmazonKendraRetriever
get_ipython().run_line_magic('pip', 'install --upgrade --quiet annoy') from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Annoy embeddings_func = HuggingFaceEmbeddings() texts = ["pizza is great", "I love salad", "my car", "a dog"] vector_store = Annoy.from_texts(texts, embeddings_func) vector_store_v2 = Annoy.from_texts( texts, embeddings_func, metric="dot", n_trees=100, n_jobs=1 ) vector_store.similarity_search("food", k=3) vector_store.similarity_search_with_score("food", k=3) from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txtn.txtn.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) docs[:5] vector_store_from_docs =
Annoy.from_documents(docs, embeddings_func)
langchain_community.vectorstores.Annoy.from_documents
from langchain_community.document_loaders import GutenbergLoader loader =
GutenbergLoader("https://www.gutenberg.org/cache/epub/69972/pg69972.txt")
langchain_community.document_loaders.GutenbergLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub langchain-openai faiss-cpu') from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db =
FAISS.from_documents(texts, embeddings)
langchain_community.vectorstores.FAISS.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet hdbcli') import os from hdbcli import dbapi connection = dbapi.connect( address=os.environ.get("HANA_DB_ADDRESS"), port=os.environ.get("HANA_DB_PORT"), user=os.environ.get("HANA_DB_USER"), password=os.environ.get("HANA_DB_PASSWORD"), autocommit=True, sslValidateCertificate=False, ) from langchain.docstore.document import Document from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores.hanavector import HanaDB from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter text_documents = TextLoader("../../modules/state_of_the_union.txt").load() text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0) text_chunks = text_splitter.split_documents(text_documents) print(f"Number of document chunks: {len(text_chunks)}") embeddings = OpenAIEmbeddings() db = HanaDB( embedding=embeddings, connection=connection, table_name="STATE_OF_THE_UNION" ) db.delete(filter={}) db.add_documents(text_chunks) query = "What did the president say about Ketanji Brown Jackson" docs = db.similarity_search(query, k=2) for doc in docs: print("-" * 80) print(doc.page_content) from langchain_community.vectorstores.utils import DistanceStrategy db = HanaDB( embedding=embeddings, connection=connection, distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name="STATE_OF_THE_UNION", ) query = "What did the president say about Ketanji Brown Jackson" docs = db.similarity_search(query, k=2) for doc in docs: print("-" * 80) print(doc.page_content) docs = db.max_marginal_relevance_search(query, k=2, fetch_k=20) for doc in docs: print("-" * 80) print(doc.page_content) db = HanaDB( connection=connection, embedding=embeddings, table_name="LANGCHAIN_DEMO_BASIC" ) db.delete(filter={}) docs = [Document(page_content="Some text"),
Document(page_content="Other docs")
langchain.docstore.document.Document
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary') get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken') YBUSER = "[SANDBOX USER]" YBPASSWORD = "[SANDBOX PASSWORD]" YBDATABASE = "[SANDBOX_DATABASE]" YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com" OPENAI_API_KEY = "[OPENAI API KEY]" import os import pathlib import re import sys import urllib.parse as urlparse from getpass import getpass import psycopg2 from IPython.display import Markdown, display from langchain.chains import LLMChain, RetrievalQAWithSourcesChain from langchain.docstore.document import Document from langchain_community.vectorstores import Yellowbrick from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter yellowbrick_connection_string = ( f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}" ) YB_DOC_DATABASE = "sample_data" YB_DOC_TABLE = "yellowbrick_documentation" embedding_table = "my_embeddings" os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) system_template = """If you don't know the answer, Make up your best guess.""" messages = [ SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}")
langchain.prompts.chat.HumanMessagePromptTemplate.from_template
from langchain.output_parsers.enum import EnumOutputParser from enum import Enum class Colors(Enum): RED = "red" GREEN = "green" BLUE = "blue" parser =
EnumOutputParser(enum=Colors)
langchain.output_parsers.enum.EnumOutputParser
import pprint from langchain_community.utilities import SearxSearchWrapper search = SearxSearchWrapper(searx_host="http://127.0.0.1:8888") search.run("What is the capital of France") search = SearxSearchWrapper( searx_host="http://127.0.0.1:8888", k=5 ) # k is for max number of items search.run("large language model ", engines=["wiki"]) search =
SearxSearchWrapper(searx_host="http://127.0.0.1:8888", k=1)
langchain_community.utilities.SearxSearchWrapper
from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain.prompts import PromptTemplate from langchain_openai import OpenAI template = """You are a chatbot having a conversation with a human. {chat_history} Human: {human_input} Chatbot:""" prompt = PromptTemplate( input_variables=["chat_history", "human_input"], template=template ) memory = ConversationBufferMemory(memory_key="chat_history") llm = OpenAI() llm_chain = LLMChain( llm=llm, prompt=prompt, verbose=True, memory=memory, ) llm_chain.predict(human_input="Hi there my friend") llm_chain.predict(human_input="Not too bad - how are you?") from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) from langchain_core.messages import SystemMessage from langchain_openai import ChatOpenAI prompt = ChatPromptTemplate.from_messages( [ SystemMessage( content="You are a chatbot having a conversation with a human." ), # The persistent system prompt MessagesPlaceholder( variable_name="chat_history" ), # Where the memory will be stored. HumanMessagePromptTemplate.from_template( "{human_input}" ), # Where the human input will injected ] ) memory =
ConversationBufferMemory(memory_key="chat_history", return_messages=True)
langchain.memory.ConversationBufferMemory
get_ipython().system(' pip install pdf2image') import arxiv from langchain_community.chat_models import ChatAnthropic from langchain_community.document_loaders import ArxivLoader, UnstructuredPDFLoader paper = next(arxiv.Search(query="Visual Instruction Tuning").results()) paper.download_pdf(filename="downloaded-paper.pdf") loader = UnstructuredPDFLoader("downloaded-paper.pdf") doc = loader.load()[0] from langchain_community.document_loaders import WebBaseLoader loader =
WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
langchain_community.document_loaders.WebBaseLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet youtube_search') from langchain.tools import YouTubeSearchTool tool =
YouTubeSearchTool()
langchain.tools.YouTubeSearchTool
"""For basic init and call""" import os from langchain_community.embeddings import VolcanoEmbeddings os.environ["VOLC_ACCESSKEY"] = "" os.environ["VOLC_SECRETKEY"] = "" embed =
VolcanoEmbeddings(volcano_ak="", volcano_sk="")
langchain_community.embeddings.VolcanoEmbeddings
import json from pprint import pprint from langchain.globals import set_debug from langchain_community.llms import NIBittensorLLM set_debug(True) llm_sys = NIBittensorLLM( system_prompt="Your task is to determine response based on user prompt.Explain me like I am technical lead of a project" ) sys_resp = llm_sys( "What is bittensor and What are the potential benefits of decentralized AI?" ) print(f"Response provided by LLM with system prompt set is : {sys_resp}") """ { "choices": [ {"index": Bittensor's Metagraph index number, "uid": Unique Identifier of a miner, "responder_hotkey": Hotkey of a miner, "message":{"role":"assistant","content": Contains actual response}, "response_ms": Time in millisecond required to fetch response from a miner} ] } """ multi_response_llm = NIBittensorLLM(top_responses=10) multi_resp = multi_response_llm("What is Neural Network Feeding Mechanism?") json_multi_resp = json.loads(multi_resp) pprint(json_multi_resp) from langchain.chains import LLMChain from langchain.globals import set_debug from langchain.prompts import PromptTemplate from langchain_community.llms import NIBittensorLLM
set_debug(True)
langchain.globals.set_debug
import os import pprint os.environ["SERPER_API_KEY"] = "" from langchain_community.utilities import GoogleSerperAPIWrapper search = GoogleSerperAPIWrapper() search.run("Obama's first name?") os.environ["OPENAI_API_KEY"] = "" from langchain.agents import AgentType, Tool, initialize_agent from langchain_community.utilities import GoogleSerperAPIWrapper from langchain_openai import OpenAI llm = OpenAI(temperature=0) search = GoogleSerperAPIWrapper() tools = [ Tool( name="Intermediate Answer", func=search.run, description="useful for when you need to ask with search", ) ] self_ask_with_search = initialize_agent( tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True ) self_ask_with_search.run( "What is the hometown of the reigning men's U.S. Open champion?" ) search = GoogleSerperAPIWrapper() results = search.results("Apple Inc.") pprint.pp(results) search = GoogleSerperAPIWrapper(type="images") results = search.results("Lion") pprint.pp(results) search = GoogleSerperAPIWrapper(type="news") results = search.results("Tesla Inc.") pprint.pp(results) search =
GoogleSerperAPIWrapper(type="news", tbs="qdr:h")
langchain_community.utilities.GoogleSerperAPIWrapper
get_ipython().system(' nomic login') get_ipython().system(' nomic login token') get_ipython().system(' pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain') import os os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" os.environ["LANGCHAIN_API_KEY"] = "api_key" from langchain_community.document_loaders import WebBaseLoader urls = [ "https://lilianweng.github.io/posts/2023-06-23-agent/", "https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/", "https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/", ] docs = [WebBaseLoader(url).load() for url in urls] docs_list = [item for sublist in docs for item in sublist] from langchain_text_splitters import CharacterTextSplitter text_splitter = CharacterTextSplitter.from_tiktoken_encoder( chunk_size=7500, chunk_overlap=100 ) doc_splits = text_splitter.split_documents(docs_list) import tiktoken encoding = tiktoken.get_encoding("cl100k_base") encoding = tiktoken.encoding_for_model("gpt-3.5-turbo") for d in doc_splits: print("The document is %s tokens" % len(encoding.encode(d.page_content))) import os from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_nomic import NomicEmbeddings from langchain_nomic.embeddings import NomicEmbeddings vectorstore = Chroma.from_documents( documents=doc_splits, collection_name="rag-chroma", embedding=NomicEmbeddings(model="nomic-embed-text-v1"), ) retriever = vectorstore.as_retriever() from langchain_community.chat_models import ChatOllama from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI(temperature=0, model="gpt-4-1106-preview") ollama_llm = "mistral:instruct" model_local =
ChatOllama(model=ollama_llm)
langchain_community.chat_models.ChatOllama
get_ipython().run_line_magic('pip', 'install --upgrade --quiet vearch') get_ipython().run_line_magic('pip', 'install --upgrade --quiet vearch_cluster') from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings from langchain_community.vectorstores.vearch import Vearch from langchain_text_splitters import RecursiveCharacterTextSplitter from transformers import AutoModel, AutoTokenizer model_path = "/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b" tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0) query = "你好!" response, history = model.chat(tokenizer, query, history=[]) print(f"Human: {query}\nChatGLM:{response}\n") query = "你知道凌波微步吗,你知道都有谁学会了吗?" response, history = model.chat(tokenizer, query, history=history) print(f"Human: {query}\nChatGLM:{response}\n") file_path = "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt" # Your local file path" loader = TextLoader(file_path, encoding="utf-8") documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) texts = text_splitter.split_documents(documents) embedding_path = "/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese" embeddings =
HuggingFaceEmbeddings(model_name=embedding_path)
langchain_community.embeddings.huggingface.HuggingFaceEmbeddings
import os from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import ForefrontAI from getpass import getpass FOREFRONTAI_API_KEY = getpass() os.environ["FOREFRONTAI_API_KEY"] = FOREFRONTAI_API_KEY llm =
ForefrontAI(endpoint_url="YOUR ENDPOINT URL HERE")
langchain_community.llms.ForefrontAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.prompts import PromptTemplate from langchain_core.runnables import ConfigurableField from langchain_openai import ChatOpenAI model = ChatOpenAI(temperature=0).configurable_fields( temperature=ConfigurableField( id="llm_temperature", name="LLM Temperature", description="The temperature of the LLM", ) ) model.invoke("pick a random number") model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number") prompt = PromptTemplate.from_template("Pick a random number above {x}") chain = prompt | model chain.invoke({"x": 0}) chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0}) from langchain.runnables.hub import HubRunnable prompt = HubRunnable("rlm/rag-prompt").configurable_fields( owner_repo_commit=ConfigurableField( id="hub_commit", name="Hub Commit", description="The Hub commit to pull from", ) ) prompt.invoke({"question": "foo", "context": "bar"}) prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke( {"question": "foo", "context": "bar"} ) from langchain.prompts import PromptTemplate from langchain_community.chat_models import ChatAnthropic from langchain_core.runnables import ConfigurableField from langchain_openai import ChatOpenAI llm =
ChatAnthropic(temperature=0)
langchain_community.chat_models.ChatAnthropic
from langchain_community.utilities import SerpAPIWrapper search = SerpAPIWrapper() search.run("Obama's first name?") params = { "engine": "bing", "gl": "us", "hl": "en", } search =
SerpAPIWrapper(params=params)
langchain_community.utilities.SerpAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-api-python-client > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-auth-oauthlib > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-auth-httplib2 > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4 > /dev/null # This is optional but is useful for parsing HTML messages') from langchain_community.agent_toolkits import GmailToolkit toolkit = GmailToolkit() from langchain_community.tools.gmail.utils import ( build_resource_service, get_gmail_credentials, ) credentials = get_gmail_credentials( token_file="token.json", scopes=["https://mail.google.com/"], client_secrets_file="credentials.json", ) api_resource = build_resource_service(credentials=credentials) toolkit =
GmailToolkit(api_resource=api_resource)
langchain_community.agent_toolkits.GmailToolkit
import asyncio from langchain.callbacks import get_openai_callback from langchain_openai import OpenAI llm = OpenAI(temperature=0) with
get_openai_callback()
langchain.callbacks.get_openai_callback
get_ipython().run_line_magic('pip', 'install --upgrade --quiet nlpcloud') from langchain_community.embeddings import NLPCloudEmbeddings import os os.environ["NLPCLOUD_API_KEY"] = "xxx" nlpcloud_embd =
NLPCloudEmbeddings()
langchain_community.embeddings.NLPCloudEmbeddings
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic') import os import boto3 comprehend_client = boto3.client("comprehend", region_name="us-east-1") from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain comprehend_moderation = AmazonComprehendModerationChain( client=comprehend_client, verbose=True, # optional ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( ModerationPiiError, ) template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comprehend_moderation | {"input": (lambda x: x["output"]) | llm} | comprehend_moderation ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?" } ) except ModerationPiiError as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import ( BaseModerationConfig, ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5) moderation_config = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler class MyModCallback(BaseModerationCallbackHandler): async def on_after_pii(self, output_beacon, unique_id): import json moderation_type = output_beacon["moderation_type"] chain_id = output_beacon["moderation_chain_id"] with open(f"output-{moderation_type}-{chain_id}.json", "w") as file: data = {"beacon_data": output_beacon, "unique_id": unique_id} json.dump(data, file) """ async def on_after_toxicity(self, output_beacon, unique_id): pass async def on_after_prompt_safety(self, output_beacon, unique_id): pass """ my_callback = MyModCallback() pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config]) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client unique_id="[email protected]", # A unique ID moderation_callback=my_callback, # BaseModerationCallbackHandler verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub') import os os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>" repo_id = "google/flan-t5-xxl" from langchain.prompts import PromptTemplate from langchain_community.llms import HuggingFaceHub template = """{question}""" prompt = PromptTemplate.from_template(template) llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256} ) pii_config = ModerationPiiConfig( labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X" ) toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8) moderation_config_1 = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) moderation_config_2 =
BaseModerationConfig(filters=[pii_config])
langchain_experimental.comprehend_moderation.BaseModerationConfig
get_ipython().run_line_magic('pip', 'install --upgrade --quiet bilibili-api-python') from langchain_community.document_loaders import BiliBiliLoader loader =
BiliBiliLoader(["https://www.bilibili.com/video/BV1xt411o7Xu/"])
langchain_community.document_loaders.BiliBiliLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sagemaker') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results') import os os.environ["OPENAI_API_KEY"] = "<ADD-KEY-HERE>" os.environ["SERPAPI_API_KEY"] = "<ADD-KEY-HERE>" from langchain.agents import initialize_agent, load_tools from langchain.callbacks import SageMakerCallbackHandler from langchain.chains import LLMChain, SimpleSequentialChain from langchain.prompts import PromptTemplate from langchain_openai import OpenAI from sagemaker.analytics import ExperimentAnalytics from sagemaker.experiments.run import Run from sagemaker.session import Session HPARAMS = { "temperature": 0.1, "model_name": "gpt-3.5-turbo-instruct", } BUCKET_NAME = None EXPERIMENT_NAME = "langchain-sagemaker-tracker" session = Session(default_bucket=BUCKET_NAME) RUN_NAME = "run-scenario-1" PROMPT_TEMPLATE = "tell me a joke about {topic}" INPUT_VARIABLES = {"topic": "fish"} with Run( experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session ) as run: sagemaker_callback = SageMakerCallbackHandler(run) llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS) prompt = PromptTemplate.from_template(template=PROMPT_TEMPLATE) chain = LLMChain(llm=llm, prompt=prompt, callbacks=[sagemaker_callback]) chain.run(**INPUT_VARIABLES) sagemaker_callback.flush_tracker() RUN_NAME = "run-scenario-2" PROMPT_TEMPLATE_1 = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title. Title: {title} Playwright: This is a synopsis for the above play:""" PROMPT_TEMPLATE_2 = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. Play Synopsis: {synopsis} Review from a New York Times play critic of the above play:""" INPUT_VARIABLES = { "input": "documentary about good video games that push the boundary of game design" } with Run( experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session ) as run: sagemaker_callback =
SageMakerCallbackHandler(run)
langchain.callbacks.SageMakerCallbackHandler
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') import os import uuid uid = uuid.uuid4().hex[:6] project_name = f"Run Fine-tuning Walkthrough {uid}" os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY" os.environ["LANGCHAIN_PROJECT"] = project_name from enum import Enum from langchain_core.pydantic_v1 import BaseModel, Field class Operation(Enum): add = "+" subtract = "-" multiply = "*" divide = "/" class Calculator(BaseModel): """A calculator function""" num1: float num2: float operation: Operation = Field(..., description="+,-,*,/") def calculate(self): if self.operation == Operation.add: return self.num1 + self.num2 elif self.operation == Operation.subtract: return self.num1 - self.num2 elif self.operation == Operation.multiply: return self.num1 * self.num2 elif self.operation == Operation.divide: if self.num2 != 0: return self.num1 / self.num2 else: return "Cannot divide by zero" from pprint import pprint from langchain.utils.openai_functions import convert_pydantic_to_openai_function from langchain_core.pydantic_v1 import BaseModel openai_function_def = convert_pydantic_to_openai_function(Calculator) pprint(openai_function_def) from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI prompt = ChatPromptTemplate.from_messages( [ ("system", "You are an accounting assistant."), ("user", "{input}"), ] ) chain = ( prompt | ChatOpenAI().bind(functions=[openai_function_def]) |
PydanticOutputFunctionsParser(pydantic_schema=Calculator)
langchain.output_parsers.openai_functions.PydanticOutputFunctionsParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints') import getpass import os if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): nvapi_key = getpass.getpass("Enter your NVIDIA API key: ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="mixtral_8x7b") result = llm.invoke("Write a ballad about LangChain.") print(result.content) print(llm.batch(["What's 2*3?", "What's 2*6?"])) for chunk in llm.stream("How far can a seagull fly in one day?"): print(chunk.content, end="|") async for chunk in llm.astream( "How long does it take for monarch butterflies to migrate?" ): print(chunk.content, end="|") ChatNVIDIA.get_available_models() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = prompt | ChatNVIDIA(model="llama2_13b") |
StrOutputParser()
langchain_core.output_parsers.StrOutputParser
from langchain.globals import set_llm_cache from langchain_openai import ChatOpenAI llm = ChatOpenAI() get_ipython().run_cell_magic('time', '', 'from langchain.cache import InMemoryCache\n\nset_llm_cache(InMemoryCache())\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict("Tell me a joke")\n') get_ipython().run_cell_magic('time', '', '# The second time it is, so it goes faster\nllm.predict("Tell me a joke")\n') get_ipython().system('rm .langchain.db') from langchain.cache import SQLiteCache set_llm_cache(
SQLiteCache(database_path=".langchain.db")
langchain.cache.SQLiteCache
get_ipython().system('pip install boto3') from langchain_experimental.recommenders import AmazonPersonalize recommender_arn = "<insert_arn>" client = AmazonPersonalize( credentials_profile_name="default", region_name="us-west-2", recommender_arn=recommender_arn, ) client.get_recommendations(user_id="1") from langchain.llms.bedrock import Bedrock from langchain_experimental.recommenders import AmazonPersonalizeChain bedrock_llm = Bedrock(model_id="anthropic.claude-v2", region_name="us-west-2") chain = AmazonPersonalizeChain.from_llm( llm=bedrock_llm, client=client, return_direct=False ) response = chain({"user_id": "1"}) print(response) from langchain.prompts.prompt import PromptTemplate RANDOM_PROMPT_QUERY = """ You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week, given the movie and user information below. Your email will leverage the power of storytelling and persuasive language. The movies to recommend and their information is contained in the <movie> tag. All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them. Put the email between <email> tags. <movie> {result} </movie> Assistant: """ RANDOM_PROMPT =
PromptTemplate(input_variables=["result"], template=RANDOM_PROMPT_QUERY)
langchain.prompts.prompt.PromptTemplate
from langchain_community.document_transformers.openai_functions import ( create_metadata_tagger, ) from langchain_core.documents import Document from langchain_openai import ChatOpenAI schema = { "properties": { "movie_title": {"type": "string"}, "critic": {"type": "string"}, "tone": {"type": "string", "enum": ["positive", "negative"]}, "rating": { "type": "integer", "description": "The number of stars the critic rated the movie", }, }, "required": ["movie_title", "critic", "tone"], } llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613") document_transformer = create_metadata_tagger(metadata_schema=schema, llm=llm) original_documents = [ Document( page_content="Review of The Bee Movie\nBy Roger Ebert\n\nThis is the greatest movie ever made. 4 out of 5 stars." ), Document( page_content="Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars.", metadata={"reliable": False}, ), ] enhanced_documents = document_transformer.transform_documents(original_documents) import json print( *[d.page_content + "\n\n" + json.dumps(d.metadata) for d in enhanced_documents], sep="\n\n---------------\n\n", ) from typing import Literal from pydantic import BaseModel, Field class Properties(BaseModel): movie_title: str critic: str tone: Literal["positive", "negative"] rating: int = Field(description="Rating out of 5 stars") document_transformer = create_metadata_tagger(Properties, llm) enhanced_documents = document_transformer.transform_documents(original_documents) print( *[d.page_content + "\n\n" + json.dumps(d.metadata) for d in enhanced_documents], sep="\n\n---------------\n\n", ) from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_template( """Extract relevant information from the following text. Anonymous critics are actually Roger Ebert. {input} """ ) document_transformer =
create_metadata_tagger(schema, llm, prompt=prompt)
langchain_community.document_transformers.openai_functions.create_metadata_tagger
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-formrecognizer > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-cognitiveservices-speech > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-textanalytics > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-vision > /dev/null') import os os.environ["OPENAI_API_KEY"] = "sk-" os.environ["AZURE_COGS_KEY"] = "" os.environ["AZURE_COGS_ENDPOINT"] = "" os.environ["AZURE_COGS_REGION"] = "" from langchain_community.agent_toolkits import AzureCognitiveServicesToolkit toolkit =
AzureCognitiveServicesToolkit()
langchain_community.agent_toolkits.AzureCognitiveServicesToolkit
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken") from langchain_community.vectorstores import DeepLake from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") activeloop_token = getpass.getpass("activeloop token:") embeddings = OpenAIEmbeddings() from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db =
DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, overwrite=True)
langchain_community.vectorstores.DeepLake
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.evaluation import load_evaluator eval_chain = load_evaluator("pairwise_string") from langchain.evaluation.loading import load_dataset dataset =
load_dataset("langchain-howto-queries")
langchain.evaluation.loading.load_dataset
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') import os import uuid uid = uuid.uuid4().hex[:6] project_name = f"Run Fine-tuning Walkthrough {uid}" os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY" os.environ["LANGCHAIN_PROJECT"] = project_name from enum import Enum from langchain_core.pydantic_v1 import BaseModel, Field class Operation(Enum): add = "+" subtract = "-" multiply = "*" divide = "/" class Calculator(BaseModel): """A calculator function""" num1: float num2: float operation: Operation = Field(..., description="+,-,*,/") def calculate(self): if self.operation == Operation.add: return self.num1 + self.num2 elif self.operation == Operation.subtract: return self.num1 - self.num2 elif self.operation == Operation.multiply: return self.num1 * self.num2 elif self.operation == Operation.divide: if self.num2 != 0: return self.num1 / self.num2 else: return "Cannot divide by zero" from pprint import pprint from langchain.utils.openai_functions import convert_pydantic_to_openai_function from langchain_core.pydantic_v1 import BaseModel openai_function_def = convert_pydantic_to_openai_function(Calculator) pprint(openai_function_def) from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI prompt = ChatPromptTemplate.from_messages( [ ("system", "You are an accounting assistant."), ("user", "{input}"), ] ) chain = ( prompt | ChatOpenAI().bind(functions=[openai_function_def]) | PydanticOutputFunctionsParser(pydantic_schema=Calculator) | (lambda x: x.calculate()) ) math_questions = [ "What's 45/9?", "What's 81/9?", "What's 72/8?", "What's 56/7?", "What's 36/6?", "What's 64/8?", "What's 12*6?", "What's 8*8?", "What's 10*10?", "What's 11*11?", "What's 13*13?", "What's 45+30?", "What's 72+28?", "What's 56+44?", "What's 63+37?", "What's 70-35?", "What's 60-30?", "What's 50-25?", "What's 40-20?", "What's 30-15?", ] results = chain.batch([{"input": q} for q in math_questions], return_exceptions=True) from langsmith.client import Client client = Client() successful_traces = { run.trace_id for run in client.list_runs( project_name=project_name, execution_order=1, error=False, ) } llm_runs = [ run for run in client.list_runs( project_name=project_name, run_type="llm", ) if run.trace_id in successful_traces ] from langchain_community.chat_loaders.langsmith import LangSmithRunChatLoader loader =
LangSmithRunChatLoader(runs=llm_runs)
langchain_community.chat_loaders.langsmith.LangSmithRunChatLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet duckdb') from langchain_community.document_loaders import DuckDBLoader get_ipython().run_cell_magic('file', 'example.csv', 'Team,Payroll\nNationals,81.34\nReds,82.20\n') loader = DuckDBLoader("SELECT * FROM read_csv_auto('example.csv')") data = loader.load() print(data) loader = DuckDBLoader( "SELECT * FROM read_csv_auto('example.csv')", page_content_columns=["Team"], metadata_columns=["Payroll"], ) data = loader.load() print(data) loader =
DuckDBLoader( "SELECT Team, Payroll, Team As source FROM read_csv_auto('example.csv')
langchain_community.document_loaders.DuckDBLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opencv-python scikit-image') import os from langchain_openai import OpenAI os.environ["OPENAI_API_KEY"] = "<your-key-here>" from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper from langchain_openai import OpenAI llm = OpenAI(temperature=0.9) prompt = PromptTemplate( input_variables=["image_desc"], template="Generate a detailed prompt to generate an image based on the following description: {image_desc}", ) chain = LLMChain(llm=llm, prompt=prompt) image_url =
DallEAPIWrapper()
langchain_community.utilities.dalle_image_generator.DallEAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain_openai') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("Input your OpenAI API key:") tidb_connection_string_template = "mysql+pymysql://<USER>:<PASSWORD>@<HOST>:4000/<DB>?ssl_ca=/etc/ssl/cert.pem&ssl_verify_cert=true&ssl_verify_identity=true" tidb_password = getpass.getpass("Input your TiDB password:") tidb_connection_string = tidb_connection_string_template.replace( "<PASSWORD>", tidb_password ) from datetime import datetime from langchain_community.chat_message_histories import TiDBChatMessageHistory history = TiDBChatMessageHistory( connection_string=tidb_connection_string, session_id="code_gen", earliest_time=datetime.utcnow(), # Optional to set earliest_time to load messages after this time point. ) history.add_user_message("How's our feature going?") history.add_ai_message( "It's going well. We are working on testing now. It will be released in Feb." ) history.messages from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_openai import ChatOpenAI prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're an assistant who's good at coding. You're helping a startup build", ),
MessagesPlaceholder(variable_name="history")
langchain_core.prompts.MessagesPlaceholder
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic') import os import boto3 comprehend_client = boto3.client("comprehend", region_name="us-east-1") from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain comprehend_moderation = AmazonComprehendModerationChain( client=comprehend_client, verbose=True, # optional ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( ModerationPiiError, ) template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comprehend_moderation | {"input": (lambda x: x["output"]) | llm} | comprehend_moderation ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?" } ) except ModerationPiiError as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import ( BaseModerationConfig, ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5) moderation_config = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler class MyModCallback(BaseModerationCallbackHandler): async def on_after_pii(self, output_beacon, unique_id): import json moderation_type = output_beacon["moderation_type"] chain_id = output_beacon["moderation_chain_id"] with open(f"output-{moderation_type}-{chain_id}.json", "w") as file: data = {"beacon_data": output_beacon, "unique_id": unique_id} json.dump(data, file) """ async def on_after_toxicity(self, output_beacon, unique_id): pass async def on_after_prompt_safety(self, output_beacon, unique_id): pass """ my_callback = MyModCallback() pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config]) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client unique_id="[email protected]", # A unique ID moderation_callback=my_callback, # BaseModerationCallbackHandler verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub') import os os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>" repo_id = "google/flan-t5-xxl" from langchain.prompts import PromptTemplate from langchain_community.llms import HuggingFaceHub template = """{question}""" prompt = PromptTemplate.from_template(template) llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256} ) pii_config = ModerationPiiConfig( labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X" ) toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8) moderation_config_1 = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) moderation_config_2 = BaseModerationConfig(filters=[pii_config]) amazon_comp_moderation = AmazonComprehendModerationChain( moderation_config=moderation_config_1, client=comprehend_client, moderation_callback=my_callback, verbose=True, ) amazon_comp_moderation_out = AmazonComprehendModerationChain( moderation_config=moderation_config_2, client=comprehend_client, verbose=True ) chain = ( prompt | amazon_comp_moderation | {"input": (lambda x: x["output"]) | llm} | amazon_comp_moderation_out ) try: response = chain.invoke( { "question": """What is John Doe's address, phone number and SSN from the following text? John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at [email protected] reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place. """ } ) except Exception as e: print(str(e)) else: print(response["output"]) endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name region = "<REGION>" # replace with your SageMaker Endpoint region import json from langchain.prompts import PromptTemplate from langchain_community.llms import SagemakerEndpoint from langchain_community.llms.sagemaker_endpoint import LLMContentHandler class ContentHandler(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: dict) -> bytes: input_str = json.dumps({"text_inputs": prompt, **model_kwargs}) return input_str.encode("utf-8") def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json["generated_texts"][0] content_handler = ContentHandler() template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer. Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at [email protected] reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place. Question: {question} Answer: """ llm_prompt = PromptTemplate.from_template(template) llm = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region, model_kwargs={ "temperature": 0.95, "max_length": 200, "num_return_sequences": 3, "top_k": 50, "top_p": 0.95, "do_sample": True, }, content_handler=content_handler, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config =
ModerationToxicityConfig(threshold=0.5)
langchain_experimental.comprehend_moderation.ModerationToxicityConfig
with open("../docs/docs/modules/state_of_the_union.txt") as f: state_of_the_union = f.read() from langchain.chains import AnalyzeDocumentChain from langchain_openai import ChatOpenAI llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) from langchain.chains.question_answering import load_qa_chain qa_chain =
load_qa_chain(llm, chain_type="map_reduce")
langchain.chains.question_answering.load_qa_chain
get_ipython().run_cell_magic('writefile', 'wechat_chats.txt', '女朋友 2023/09/16 2:51 PM\n天气有点凉\n\n男朋友 2023/09/16 2:51 PM\n珍簟凉风著,瑶琴寄恨生。嵇君懒书札,底物慰秋情。\n\n女朋友 2023/09/16 3:06 PM\n忙什么呢\n\n男朋友 2023/09/16 3:06 PM\n今天只干成了一件像样的事\n那就是想你\n\n女朋友 2023/09/16 3:06 PM\n[动画表情]\n') import logging import re from typing import Iterator, List from langchain_community.chat_loaders import base as chat_loaders from langchain_core.messages import BaseMessage, HumanMessage logger = logging.getLogger() class WeChatChatLoader(chat_loaders.BaseChatLoader): def __init__(self, path: str): """ Initialize the Discord chat loader. Args: path: Path to the exported Discord chat text file. """ self.path = path self._message_line_regex = re.compile( r"(?P<sender>.+?) (?P<timestamp>\d{4}/\d{2}/\d{2} \d{1,2}:\d{2} (?:AM|PM))", # noqa ) def _append_message_to_results( self, results: List, current_sender: str, current_timestamp: str, current_content: List[str], ): content = "\n".join(current_content).strip() if not re.match(r"\[.*\]", content): results.append( HumanMessage( content=content, additional_kwargs={ "sender": current_sender, "events": [{"message_time": current_timestamp}], }, ) ) return results def _load_single_chat_session_from_txt( self, file_path: str ) -> chat_loaders.ChatSession: """ Load a single chat session from a text file. Args: file_path: Path to the text file containing the chat messages. Returns: A `ChatSession` object containing the loaded chat messages. """ with open(file_path, "r", encoding="utf-8") as file: lines = file.readlines() results: List[BaseMessage] = [] current_sender = None current_timestamp = None current_content = [] for line in lines: if re.match(self._message_line_regex, line): if current_sender and current_content: results = self._append_message_to_results( results, current_sender, current_timestamp, current_content ) current_sender, current_timestamp = re.match( self._message_line_regex, line ).groups() current_content = [] else: current_content.append(line.strip()) if current_sender and current_content: results = self._append_message_to_results( results, current_sender, current_timestamp, current_content ) return chat_loaders.ChatSession(messages=results) def lazy_load(self) -> Iterator[chat_loaders.ChatSession]: """ Lazy load the messages from the chat file and yield them in the required format. Yields: A `ChatSession` object containing the loaded chat messages. """ yield self._load_single_chat_session_from_txt(self.path) loader = WeChatChatLoader( path="./wechat_chats.txt", ) from typing import List from langchain_community.chat_loaders.base import ChatSession from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) raw_messages = loader.lazy_load() merged_messages = merge_chat_runs(raw_messages) messages: List[ChatSession] = list(
map_ai_messages(merged_messages, sender="男朋友")
langchain_community.chat_loaders.utils.map_ai_messages
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pygithub') import os from langchain.agents import AgentType, initialize_agent from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit from langchain_community.utilities.github import GitHubAPIWrapper from langchain_openai import ChatOpenAI os.environ["GITHUB_APP_ID"] = "123456" os.environ["GITHUB_APP_PRIVATE_KEY"] = "path/to/your/private-key.pem" os.environ["GITHUB_REPOSITORY"] = "username/repo-name" os.environ["GITHUB_BRANCH"] = "bot-branch-name" os.environ["GITHUB_BASE_BRANCH"] = "main" os.environ["OPENAI_API_KEY"] = "" llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview") github = GitHubAPIWrapper() toolkit = GitHubToolkit.from_github_api_wrapper(github) tools = toolkit.get_tools() agent = initialize_agent( tools, llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True, ) print("Available tools:") for tool in tools: print("\t" + tool.name) agent.run( "You have the software engineering capabilities of a Google Principle engineer. You are tasked with completing issues on a github repository. Please look at the existing issues and complete them." ) from langchain import hub gh_issue_prompt_template = hub.pull("kastanday/new-github-issue") print(gh_issue_prompt_template.template) def format_issue(issue): title = f"Title: {issue.get('title')}." opened_by = f"Opened by user: {issue.get('opened_by')}" body = f"Body: {issue.get('body')}" comments = issue.get("comments") # often too long return "\n".join([title, opened_by, body]) issue = github.get_issue(33) # task to implement a RNA-seq pipeline (bioinformatics) final_gh_issue_prompt = gh_issue_prompt_template.format( issue_description=format_issue(issue) ) print(final_gh_issue_prompt) from langchain.memory.summary_buffer import ConversationSummaryBufferMemory from langchain_core.prompts.chat import MessagesPlaceholder summarizer_llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # type: ignore chat_history =
MessagesPlaceholder(variable_name="chat_history")
langchain_core.prompts.chat.MessagesPlaceholder
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)') get_ipython().system(' pip install "unstructured[all-docs]" pillow pydantic lxml pillow matplotlib chromadb tiktoken') from langchain_text_splitters import CharacterTextSplitter from unstructured.partition.pdf import partition_pdf def extract_pdf_elements(path, fname): """ Extract images, tables, and chunk text from a PDF file. path: File path, which is used to dump images (.jpg) fname: File name """ return partition_pdf( filename=path + fname, extract_images_in_pdf=False, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) def categorize_elements(raw_pdf_elements): """ Categorize extracted elements from a PDF into tables and texts. raw_pdf_elements: List of unstructured.documents.elements """ tables = [] texts = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): tables.append(str(element)) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): texts.append(str(element)) return texts, tables fpath = "/Users/rlm/Desktop/cj/" fname = "cj.pdf" raw_pdf_elements = extract_pdf_elements(fpath, fname) texts, tables = categorize_elements(raw_pdf_elements) text_splitter = CharacterTextSplitter.from_tiktoken_encoder( chunk_size=4000, chunk_overlap=0 ) joined_texts = " ".join(texts) texts_4k_token = text_splitter.split_text(joined_texts) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI def generate_text_summaries(texts, tables, summarize_texts=False): """ Summarize text elements texts: List of str tables: List of str summarize_texts: Bool to summarize texts """ prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \ These summaries will be embedded and used to retrieve the raw text or table elements. \ Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() text_summaries = [] table_summaries = [] if texts and summarize_texts: text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) elif texts: text_summaries = texts if tables: table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) return text_summaries, table_summaries text_summaries, table_summaries = generate_text_summaries( texts_4k_token, tables, summarize_texts=True ) import base64 import os from langchain_core.messages import HumanMessage def encode_image(image_path): """Getting the base64 string""" with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def image_summarize(img_base64, prompt): """Make image summary""" chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024) msg = chat.invoke( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, }, ] ) ] ) return msg.content def generate_img_summaries(path): """ Generate summaries and base64 encoded strings for images path: Path to list of .jpg files extracted by Unstructured """ img_base64_list = [] image_summaries = [] prompt = """You are an assistant tasked with summarizing images for retrieval. \ These summaries will be embedded and used to retrieve the raw image. \ Give a concise summary of the image that is well optimized for retrieval.""" for img_file in sorted(os.listdir(path)): if img_file.endswith(".jpg"): img_path = os.path.join(path, img_file) base64_image = encode_image(img_path) img_base64_list.append(base64_image) image_summaries.append(image_summarize(base64_image, prompt)) return img_base64_list, image_summaries img_base64_list, image_summaries = generate_img_summaries(fpath) import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings def create_multi_vector_retriever( vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images ): """ Create retriever that indexes summaries, but returns raw images or texts """ store =
InMemoryStore()
langchain.storage.InMemoryStore
from langchain_community.document_loaders import AZLyricsLoader loader =
AZLyricsLoader("https://www.azlyrics.com/lyrics/mileycyrus/flowers.html")
langchain_community.document_loaders.AZLyricsLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet promptlayer') import os import promptlayer from langchain_community.llms import PromptLayerOpenAI from getpass import getpass PROMPTLAYER_API_KEY = getpass() os.environ["PROMPTLAYER_API_KEY"] = PROMPTLAYER_API_KEY from getpass import getpass OPENAI_API_KEY = getpass() os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY llm =
PromptLayerOpenAI(pl_tags=["langchain"])
langchain_community.llms.PromptLayerOpenAI