prompt
stringlengths 70
19.8k
| completion
stringlengths 8
303
| api
stringlengths 23
93
|
---|---|---|
get_ipython().run_line_magic('pip', 'install llama-index-llms-ai21')
get_ipython().system('pip install llama-index')
from llama_index.llms.ai21 import AI21
api_key = "Your api key"
resp = | AI21(api_key=api_key) | llama_index.llms.ai21.AI21 |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system('pip install -q llama-index google-generativeai')
get_ipython().run_line_magic('env', 'GOOGLE_API_KEY=...')
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.llms.gemini import Gemini
resp = Gemini().complete("Write a poem about a magic backpack")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.gemini import Gemini
messages = [
ChatMessage(role="user", content="Hello friend!"),
ChatMessage(role="assistant", content="Yarr what is shakin' matey?"),
ChatMessage(
role="user", content="Help me decide what to have for dinner."
),
]
resp = Gemini().chat(messages)
print(resp)
from llama_index.llms.gemini import Gemini
llm = Gemini()
resp = llm.stream_complete(
"The story of Sourcrust, the bread creature, is really interesting. It all started when..."
)
for r in resp:
print(r.text, end="")
from llama_index.llms.gemini import Gemini
from llama_index.core.llms import ChatMessage
llm = Gemini()
messages = [
ChatMessage(role="user", content="Hello friend!"),
ChatMessage(role="assistant", content="Yarr what is shakin' matey?"),
ChatMessage(
role="user", content="Help me decide what to have for dinner."
),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
import google.generativeai as genai
for m in genai.list_models():
if "generateContent" in m.supported_generation_methods:
print(m.name)
from llama_index.llms.gemini import Gemini
llm = | Gemini(model="models/gemini-pro") | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-cohere')
get_ipython().system('pip install llama-index')
import os
cohere_api_key = "YOUR_API_KEY"
os.environ["COHERE_API_KEY"] = cohere_api_key
from llama_index.embeddings.cohere import CohereEmbedding
embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key,
model_name="embed-english-v3.0",
input_type="search_query",
)
embeddings = embed_model.get_text_embedding("Hello CohereAI!")
print(len(embeddings))
print(embeddings[:5])
embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key,
model_name="embed-english-v3.0",
input_type="search_document",
)
embeddings = embed_model.get_text_embedding("Hello CohereAI!")
print(len(embeddings))
print(embeddings[:5])
embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key, model_name="embed-english-v2.0"
)
embeddings = embed_model.get_text_embedding("Hello CohereAI!")
print(len(embeddings))
print(embeddings[:5])
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.litellm import LiteLLM
from llama_index.core.response.notebook_utils import display_source_node
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
llm = | LiteLLM("command-nightly") | llama_index.llms.litellm.LiteLLM |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('pip install llama-index')
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
html_file = reader.load_data(Path("./stack-overflow.html"))
md_file = reader.load_data(Path("./README.md"))
print(html_file[0].metadata)
print(html_file[0])
print("----")
print(md_file[0].metadata)
print(md_file[0])
parser = | SimpleFileNodeParser() | llama_index.core.node_parser.SimpleFileNodeParser |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface-optimum-intel')
from llama_index.embeddings.huggingface_optimum_intel import IntelEmbedding
embed_model = | IntelEmbedding("Intel/bge-small-en-v1.5-rag-int8-static") | llama_index.embeddings.huggingface_optimum_intel.IntelEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-readers-mbox')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=sk-************')
from llama_index.readers.mbox import MboxReader
from llama_index.core import VectorStoreIndex
documents = | MboxReader() | llama_index.readers.mbox.MboxReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = | FlatReader() | llama_index.readers.file.FlatReader |
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import DatasetGenerator
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
import random
random.seed(42)
random.shuffle(documents)
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
question_gen_query = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context from a "
"report on climate change and the oceans, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
dataset_generator = DatasetGenerator.from_documents(
documents[:50],
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("train_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
dataset_generator = DatasetGenerator.from_documents(
documents[
50:
], # since we generated ~1 question for 40 documents, we can skip the first 40
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("eval_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index.core import VectorStoreIndex, Settings
Settings.context_window = 2048
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=gpt_35_llm)
contexts = []
answers = []
for question in questions:
response = query_engine.query(question)
contexts.append([x.node.get_content() for x in response.source_nodes])
answers.append(str(response))
from datasets import Dataset
from ragas import evaluate
from ragas.metrics import answer_relevancy, faithfulness
ds = Dataset.from_dict(
{
"question": questions,
"answer": answers,
"contexts": contexts,
}
)
result = evaluate(ds, [answer_relevancy, faithfulness])
print(result)
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
finetuning_handler = | OpenAIFineTuningHandler() | llama_index.core.callbacks.OpenAIFineTuningHandler |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-tools-metaphor')
get_ipython().system('wget "https://images.openai.com/blob/a2e49de2-ba5b-4869-9c2d-db3b4b5dcc19/new-models-and-developer-products-announced-at-devday.jpg?width=2000" -O other_images/openai/dev_day.png')
get_ipython().system('wget "https://drive.google.com/uc\\?id\\=1B4f5ZSIKN0zTTPPRlZ915Ceb3_uF9Zlq\\&export\\=download" -O other_images/adidas.png')
from llama_index.readers.web import SimpleWebPageReader
url = "https://openai.com/blog/new-models-and-developer-products-announced-at-devday"
reader = SimpleWebPageReader(html_to_text=True)
documents = reader.load_data(urls=[url])
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
vector_index = VectorStoreIndex.from_documents(
documents,
)
query_tool = QueryEngineTool(
query_engine=vector_index.as_query_engine(),
metadata=ToolMetadata(
name=f"vector_tool",
description=(
"Useful to lookup new features announced by OpenAI"
),
),
)
from llama_index.core.agent.react_multimodal.step import (
MultimodalReActAgentWorker,
)
from llama_index.core.agent import AgentRunner
from llama_index.core.multi_modal_llms import MultiModalLLM
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core.agent import Task
mm_llm = | OpenAIMultiModal(model="gpt-4-vision-preview", max_new_tokens=1000) | llama_index.multi_modal_llms.openai.OpenAIMultiModal |
get_ipython().system('pip install llama-index')
import logging
import sys
from IPython.display import Markdown, display
import pandas as pd
from llama_index.core.query_engine import PandasQueryEngine
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
df = pd.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
}
)
query_engine = PandasQueryEngine(df=df, verbose=True)
response = query_engine.query(
"What is the city with the highest population?",
)
display(Markdown(f"<b>{response}</b>"))
print(response.metadata["pandas_instruction_str"])
query_engine = PandasQueryEngine(df=df, verbose=True, synthesize_response=True)
response = query_engine.query(
"What is the city with the highest population? Give both the city and population",
)
print(str(response))
get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'")
df = pd.read_csv("./titanic_train.csv")
query_engine = | PandasQueryEngine(df=df, verbose=True) | llama_index.core.query_engine.PandasQueryEngine |
get_ipython().system('pip install llama-index-multi-modal-llms-ollama')
get_ipython().system('pip install llama-index-readers-file')
get_ipython().system('pip install unstructured')
get_ipython().system('pip install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index-embeddings-clip')
from llama_index.multi_modal_llms.ollama import OllamaMultiModal
mm_model = OllamaMultiModal(model="llava:13b")
from pathlib import Path
from llama_index.core import SimpleDirectoryReader
from PIL import Image
import matplotlib.pyplot as plt
input_image_path = Path("restaurant_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png')
image_documents = SimpleDirectoryReader("./restaurant_images").load_data()
imageUrl = "./restaurant_images/fried_chicken.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
from pydantic import BaseModel
class Restaurant(BaseModel):
"""Data model for an restaurant."""
restaurant: str
food: str
discount: str
price: str
rating: str
review: str
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
{query_str}
Return the answer as a Pydantic object. The Pydantic schema is given below:
"""
mm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Restaurant),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=mm_model,
verbose=True,
)
response = mm_program(query_str="Can you summarize what is in the image?")
for res in response:
print(res)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg')
from pathlib import Path
from llama_index.readers.file import UnstructuredReader
from llama_index.core.schema import ImageDocument
loader = UnstructuredReader()
documents = loader.load_data(file=Path("tesla_2021_10k.htm"))
image_doc = ImageDocument(image_path="./shanghai.jpg")
from llama_index.core import VectorStoreIndex
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-m3")
vector_index = VectorStoreIndex.from_documents(
documents, embed_model=embed_model
)
query_engine = vector_index.as_query_engine()
from llama_index.core.prompts import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline, FnComponent
query_prompt_str = """\
Please expand the initial statement using the provided context from the Tesla 10K report.
{initial_statement}
"""
query_prompt_tmpl = PromptTemplate(query_prompt_str)
qp = QueryPipeline(
modules={
"mm_model": mm_model.as_query_component(
partial={"image_documents": [image_doc]}
),
"query_prompt": query_prompt_tmpl,
"query_engine": query_engine,
},
verbose=True,
)
qp.add_chain(["mm_model", "query_prompt", "query_engine"])
rag_response = qp.run("Which Tesla Factory is shown in the image?")
print(f"> Retrieval Augmented Response: {rag_response}")
rag_response.source_nodes[1].get_content()
get_ipython().system('wget "https://drive.usercontent.google.com/download?id=1qQDcaKuzgRGuEC1kxgYL_4mx7vG-v4gC&export=download&authuser=1&confirm=t&uuid=f944e95f-a31f-4b55-b68f-8ea67a6e90e5&at=APZUnTVZ6n1aOg7rtkcjBjw7Pt1D:1707010667927" -O mixed_wiki.zip')
get_ipython().system('unzip mixed_wiki.zip')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O ./mixed_wiki/tesla_2021_10k.htm')
from llama_index.core.indices.multi_modal.base import (
MultiModalVectorStoreIndex,
)
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.embeddings.clip import ClipEmbedding
import qdrant_client
from llama_index import (
SimpleDirectoryReader,
)
client = qdrant_client.QdrantClient(path="qdrant_mm_db")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
image_embed_model = ClipEmbedding()
documents = | SimpleDirectoryReader("./mixed_wiki/") | llama_index.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, Document
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
from llama_index.core.evaluation import CorrectnessEvaluator
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
uber_docs0 = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
uber_doc = Document(text="\n\n".join([d.get_content() for d in uber_docs0]))
from llama_index.core.utils import globals_helper
num_tokens = len(globals_helper.tokenizer(uber_doc.get_content()))
print(f"NUM TOKENS: {num_tokens}")
context_str = "Jerry's favorite snack is Hot Cheetos."
query_str = "What is Jerry's favorite snack?"
def augment_doc(doc_str, context, position):
"""Augment doc with additional context at a given position."""
doc_str1 = doc_str[:position]
doc_str2 = doc_str[position:]
return f"{doc_str1}...\n\n{context}\n\n...{doc_str2}"
test_str = augment_doc(
uber_doc.get_content(), context_str, int(0.5 * len(uber_doc.get_content()))
)
async def run_experiments(
doc, position_percentiles, context_str, query, llm, response_mode="compact"
):
eval_llm = OpenAI(model="gpt-4-1106-preview")
correctness_evaluator = | CorrectnessEvaluator(llm=eval_llm) | llama_index.core.evaluation.CorrectnessEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
display_source_node(node)
from llama_index.core.tools import RetrieverTool
vector_retriever = VectorIndexRetriever(index)
bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
retriever_tools = [
RetrieverTool.from_defaults(
retriever=vector_retriever,
description="Useful in most cases",
),
RetrieverTool.from_defaults(
retriever=bm25_retriever,
description="Useful if searching about specific information",
),
]
from llama_index.core.retrievers import RouterRetriever
retriever = RouterRetriever.from_defaults(
retriever_tools=retriever_tools,
llm=llm,
select_multi=True,
)
nodes = retriever.retrieve(
"Can you give me all the context regarding the author's life?"
)
for node in nodes:
display_source_node(node)
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import (
VectorStoreIndex,
StorageContext,
SimpleDirectoryReader,
Document,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
llm = OpenAI(model="gpt-3.5-turbo")
splitter = SentenceSplitter(chunk_size=256)
nodes = splitter.get_nodes_from_documents(
[Document(text=documents[0].get_content()[:1000000])]
)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.retrievers.bm25 import BM25Retriever
vector_retriever = index.as_retriever(similarity_top_k=10)
bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=10)
from llama_index.core.retrievers import BaseRetriever
class HybridRetriever(BaseRetriever):
def __init__(self, vector_retriever, bm25_retriever):
self.vector_retriever = vector_retriever
self.bm25_retriever = bm25_retriever
super().__init__()
def _retrieve(self, query, **kwargs):
bm25_nodes = self.bm25_retriever.retrieve(query, **kwargs)
vector_nodes = self.vector_retriever.retrieve(query, **kwargs)
all_nodes = []
node_ids = set()
for n in bm25_nodes + vector_nodes:
if n.node.node_id not in node_ids:
all_nodes.append(n)
node_ids.add(n.node.node_id)
return all_nodes
index.as_retriever(similarity_top_k=5)
hybrid_retriever = HybridRetriever(vector_retriever, bm25_retriever)
get_ipython().system('pip install sentence-transformers')
from llama_index.core.postprocessor import SentenceTransformerRerank
reranker = SentenceTransformerRerank(top_n=4, model="BAAI/bge-reranker-base")
from llama_index.core import QueryBundle
retrieved_nodes = hybrid_retriever.retrieve(
"What is the impact of climate change on the ocean?"
)
reranked_nodes = reranker.postprocess_nodes(
nodes,
query_bundle=QueryBundle(
"What is the impact of climate change on the ocean?"
),
)
print("Initial retrieval: ", len(retrieved_nodes), " nodes")
print("Re-ranked retrieval: ", len(reranked_nodes), " nodes")
from llama_index.core.response.notebook_utils import display_source_node
for node in reranked_nodes:
display_source_node(node)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(
retriever=hybrid_retriever,
node_postprocessors=[reranker],
llm=llm,
)
response = query_engine.query(
"What is the impact of climate change on the ocean?"
)
from llama_index.core.response.notebook_utils import display_response
| display_response(response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = CohereRerank(top_n=10)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
response = query_engine.query(
"Tell me about the main aspects of safety fine-tuning"
)
display_response(
response, show_source=True, source_length=500, show_source_metadata=True
)
from collections import defaultdict
import pandas as pd
def mrr_all(metadata_values, metadata_key, source_nodes):
value_to_mrr_dict = {}
for metadata_value in metadata_values:
mrr = 0
for idx, source_node in enumerate(source_nodes):
if source_node.node.metadata[metadata_key] == metadata_value:
mrr = 1 / (idx + 1)
break
else:
continue
value_to_mrr_dict[metadata_value] = mrr
df = pd.DataFrame(value_to_mrr_dict, index=["MRR"])
df.style.set_caption("Mean Reciprocal Rank")
return df
print("Mean Reciprocal Rank for each Chunk Size")
mrr_all(chunk_sizes, "chunk_size", response.source_nodes)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_llm = OpenAI(model="gpt-4")
dataset_generator = DatasetGenerator(
nodes_list[-1],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=2,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
import asyncio
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
RelevancyEvaluator,
FaithfulnessEvaluator,
PairwiseComparisonEvaluator,
)
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_r = RelevancyEvaluator(llm=eval_llm)
evaluator_f = FaithfulnessEvaluator(llm=eval_llm)
pairwise_evaluator = PairwiseComparisonEvaluator(llm=eval_llm)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
max_samples = 60
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
base_query_engine = vector_indices[-1].as_query_engine(similarity_top_k=2)
reranker = CohereRerank(top_n=4)
query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
base_pred_responses = get_responses(
eval_qs[:max_samples], base_query_engine, show_progress=True
)
pred_responses = get_responses(
eval_qs[:max_samples], query_engine, show_progress=True
)
import numpy as np
pred_response_strs = [str(p) for p in pred_responses]
base_pred_response_strs = [str(p) for p in base_pred_responses]
evaluator_dict = {
"correctness": evaluator_c,
"faithfulness": evaluator_f,
"semantic_similarity": evaluator_s,
}
batch_runner = | BatchEvalRunner(evaluator_dict, workers=1, show_progress=True) | llama_index.core.evaluation.BatchEvalRunner |
get_ipython().system('pip install llama-index')
from llama_index.core.evaluation import SemanticSimilarityEvaluator
evaluator = | SemanticSimilarityEvaluator() | llama_index.core.evaluation.SemanticSimilarityEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-llms-vertex')
from llama_index.llms.vertex import Vertex
from google.oauth2 import service_account
filename = "vertex-407108-37495ce6c303.json"
credentials: service_account.Credentials = (
service_account.Credentials.from_service_account_file(filename)
)
Vertex(
model="text-bison", project=credentials.project_id, credentials=credentials
)
from llama_index.llms.vertex import Vertex
from llama_index.core.llms import ChatMessage, MessageRole
llm = Vertex(model="text-bison", temperature=0, additional_kwargs={})
llm.complete("Hello this is a sample text").text
(await llm.acomplete("hello")).text
list(llm.stream_complete("hello"))[-1].text
chat = | Vertex(model="chat-bison") | llama_index.llms.vertex.Vertex |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
len(eval_dataset.qr_pairs)
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
nodes = pipeline.run(documents=docs)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = | get_responses(eval_qs, query_engine, show_progress=True) | llama_index.core.evaluation.eval_utils.get_responses |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-faiss')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import faiss
d = 1536
faiss_index = faiss.IndexFlatL2(d)
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.faiss import FaissVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = FaissVectorStore(faiss_index=faiss_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
index.storage_context.persist()
vector_store = | FaissVectorStore.from_persist_dir("./storage") | llama_index.vector_stores.faiss.FaissVectorStore.from_persist_dir |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.chat_engine import SimpleChatEngine
chat_engine = SimpleChatEngine.from_defaults()
chat_engine.chat_repl()
from llama_index.llms.openai import OpenAI
llm = OpenAI(temperature=0.0, model="gpt-3.5-turbo")
from llama_index.core.chat_engine import SimpleChatEngine
chat_engine = | SimpleChatEngine.from_defaults(llm=llm) | llama_index.core.chat_engine.SimpleChatEngine.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
os.environ["OPENAI_API_KEY"] = "INSERT OPENAI KEY"
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
from llama_index.core import SimpleDirectoryReader, KnowledgeGraphIndex
from llama_index.core.graph_stores import SimpleGraphStore
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
from IPython.display import Markdown, display
documents = SimpleDirectoryReader(
"../../../../examples/paul_graham_essay/data"
).load_data()
llm = OpenAI(temperature=0, model="text-davinci-002")
Settings.llm = llm
Settings.chunk_size = 512
from llama_index.core import StorageContext
graph_store = | SimpleGraphStore() | llama_index.core.graph_stores.SimpleGraphStore |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY_HERE"
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import weaviate
resource_owner_config = weaviate.AuthClientPassword(
username="<username>",
password="<password>",
)
client = weaviate.Client(
"https://llama-test-ezjahb4m.weaviate.network",
auth_client_secret=resource_owner_config,
)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import StorageContext
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name="LlamaIndex"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
resource_owner_config = weaviate.AuthClientPassword(
username="<username>",
password="<password>",
)
client = weaviate.Client(
"https://llama-test-ezjahb4m.weaviate.network",
auth_client_secret=resource_owner_config,
)
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name="LlamaIndex"
)
loaded_index = VectorStoreIndex.from_vector_store(vector_store)
query_engine = loaded_index.as_query_engine()
response = query_engine.query("What happened at interleaf?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.core import Document
doc = Document.example()
print(doc.metadata)
print("-----")
print(doc.text[:100])
loaded_index.insert(doc)
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
filters = MetadataFilters(
filters=[ | ExactMatchFilter(key="filename", value="README.md") | llama_index.core.vector_stores.ExactMatchFilter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.agent import (
CustomSimpleAgentWorker,
Task,
AgentChatResponse,
)
from typing import Dict, Any, List, Tuple, Optional
from llama_index.core.tools import BaseTool, QueryEngineTool
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core import ChatPromptTemplate, PromptTemplate
from llama_index.core.selectors import PydanticSingleSelector
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.llms import ChatMessage, MessageRole
DEFAULT_PROMPT_STR = """
Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \
a modified question that will not trigger the error.
Examples of modified questions:
- The question itself is modified to elicit a non-erroneous response
- The question is augmented with context that will help the downstream system better answer the question.
- The question is augmented with examples of negative responses, or other negative questions.
An error means that either an exception has triggered, or the response is completely irrelevant to the question.
Please return the evaluation of the response in the following JSON format.
"""
def get_chat_prompt_template(
system_prompt: str, current_reasoning: Tuple[str, str]
) -> ChatPromptTemplate:
system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt)
messages = [system_msg]
for raw_msg in current_reasoning:
if raw_msg[0] == "user":
messages.append(
ChatMessage(role=MessageRole.USER, content=raw_msg[1])
)
else:
messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1])
)
return ChatPromptTemplate(message_templates=messages)
class ResponseEval(BaseModel):
"""Evaluation of whether the response has an error."""
has_error: bool = Field(
..., description="Whether the response has an error."
)
new_question: str = Field(..., description="The suggested new question.")
explanation: str = Field(
...,
description=(
"The explanation for the error as well as for the new question."
"Can include the direct stack trace as well."
),
)
from llama_index.core.bridge.pydantic import PrivateAttr
class RetryAgentWorker(CustomSimpleAgentWorker):
"""Agent worker that adds a retry layer on top of a router.
Continues iterating until there's no errors / task is done.
"""
prompt_str: str = Field(default=DEFAULT_PROMPT_STR)
max_iterations: int = Field(default=10)
_router_query_engine: RouterQueryEngine = PrivateAttr()
def __init__(self, tools: List[BaseTool], **kwargs: Any) -> None:
"""Init params."""
for tool in tools:
if not isinstance(tool, QueryEngineTool):
raise ValueError(
f"Tool {tool.metadata.name} is not a query engine tool."
)
self._router_query_engine = RouterQueryEngine(
selector=PydanticSingleSelector.from_defaults(),
query_engine_tools=tools,
verbose=kwargs.get("verbose", False),
)
super().__init__(
tools=tools,
**kwargs,
)
def _initialize_state(self, task: Task, **kwargs: Any) -> Dict[str, Any]:
"""Initialize state."""
return {"count": 0, "current_reasoning": []}
def _run_step(
self, state: Dict[str, Any], task: Task, input: Optional[str] = None
) -> Tuple[AgentChatResponse, bool]:
"""Run step.
Returns:
Tuple of (agent_response, is_done)
"""
if "new_input" not in state:
new_input = task.input
else:
new_input = state["new_input"]
response = self._router_query_engine.query(new_input)
state["current_reasoning"].extend(
[("user", new_input), ("assistant", str(response))]
)
chat_prompt_tmpl = get_chat_prompt_template(
self.prompt_str, state["current_reasoning"]
)
llm_program = LLMTextCompletionProgram.from_defaults(
output_parser= | PydanticOutputParser(output_cls=ResponseEval) | llama_index.core.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openllm')
get_ipython().system('pip install "openllm" # use \'openllm[vllm]\' if you have access to GPU')
get_ipython().system('pip install llama-index')
import os
from typing import List, Optional
from llama_index.llms.openllm import OpenLLM, OpenLLMAPI
from llama_index.core.llms import ChatMessage
os.environ[
"OPENLLM_ENDPOINT"
] = "na" # Change this to a remote server that you might run OpenLLM at.
local_llm = OpenLLM("HuggingFaceH4/zephyr-7b-alpha")
remote_llm = OpenLLMAPI(address="http://localhost:3000")
remote_llm = | OpenLLMAPI() | llama_index.llms.openllm.OpenLLMAPI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks')
get_ipython().run_line_magic('pip', 'install llama-index')
from llama_index.llms.fireworks import Fireworks
resp = Fireworks().complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.fireworks import Fireworks
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Fireworks().chat(messages)
print(resp)
from llama_index.llms.fireworks import Fireworks
llm = Fireworks()
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.fireworks import Fireworks
from llama_index.core.llms import ChatMessage
llm = Fireworks()
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
from llama_index.llms.fireworks import Fireworks
llm = Fireworks(model="accounts/fireworks/models/firefunction-v1")
resp = llm.complete("Paul Graham is ")
print(resp)
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.chat(messages)
print(resp)
from llama_index.llms.fireworks import Fireworks
llm = Fireworks(
model="accounts/fireworks/models/firefunction-v1", api_key="BAD_KEY"
)
resp = | Fireworks() | llama_index.llms.fireworks.Fireworks |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-experimental-param-tuner')
get_ipython().system('pip install llama-index llama-hub')
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
import nest_asyncio
nest_asyncio.apply()
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.readers.file import UnstructuredReader
from llama_index.readers.file import PyMuPDFReader
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.schema import IndexNode
get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json')
from llama_index.core.evaluation import QueryResponseDataset
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
eval_qs = eval_dataset.questions
ref_response_strs = [r for (_, r) in eval_dataset.qr_pairs]
from llama_index.core import (
VectorStoreIndex,
load_index_from_storage,
StorageContext,
)
from llama_index.experimental.param_tuner import ParamTuner
from llama_index.core.param_tuner.base import TunedResult, RunResult
from llama_index.core.evaluation.eval_utils import (
get_responses,
aget_responses,
)
from llama_index.core.evaluation import (
SemanticSimilarityEvaluator,
BatchEvalRunner,
)
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
import os
import numpy as np
from pathlib import Path
def _build_index(chunk_size, docs):
index_out_path = f"./storage_{chunk_size}"
if not os.path.exists(index_out_path):
Path(index_out_path).mkdir(parents=True, exist_ok=True)
node_parser = | SimpleNodeParser.from_defaults(chunk_size=chunk_size) | llama_index.core.node_parser.SimpleNodeParser.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.schema import TextNode
from llama_index.core.indices.managed.types import ManagedIndexQueryMode
from llama_index.indices.managed.vectara import VectaraIndex
from llama_index.indices.managed.vectara import VectaraAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
from llama_index.llms.openai import OpenAI
nodes = [
TextNode(
text=(
"A pragmatic paleontologist touring an almost complete theme park on an island "
+ "in Central America is tasked with protecting a couple of kids after a power "
+ "failure causes the park's cloned dinosaurs to run loose."
),
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
TextNode(
text=(
"A thief who steals corporate secrets through the use of dream-sharing technology "
+ "is given the inverse task of planting an idea into the mind of a C.E.O., "
+ "but his tragic past may doom the project and his team to disaster."
),
metadata={
"year": 2010,
"director": "Christopher Nolan",
"rating": 8.2,
},
),
TextNode(
text="Barbie suffers a crisis that leads her to question her world and her existence.",
metadata={
"year": 2023,
"director": "Greta Gerwig",
"genre": "fantasy",
"rating": 9.5,
},
),
TextNode(
text=(
"A cowboy doll is profoundly threatened and jealous when a new spaceman action "
+ "figure supplants him as top toy in a boy's bedroom."
),
metadata={"year": 1995, "genre": "animated", "rating": 8.3},
),
| TextNode(
text=(
"When Woody is stolen by a toy collector, Buzz and his friends set out on a "
+ "rescue mission to save Woody before he becomes a museum toy property with his "
+ "roundup gang Jessie, Prospector, and Bullseye. "
) | llama_index.core.schema.TextNode |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
TABLE_NAME = os.environ["DYNAMODB_TABLE_NAME"]
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
from llama_index.storage.index_store.dynamodb import DynamoDBIndexStore
from llama_index.vector_stores.dynamodb import DynamoDBVectorStore
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME),
vector_store= | DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME) | llama_index.vector_stores.dynamodb.DynamoDBVectorStore.from_table_name |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import datasets
dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR")
dataset
from llama_index.core import get_tokenizer
import re
from typing import Set, List
tokenizer = get_tokenizer()
sample_size = 5
def get_reactions_row(raw_target: str) -> List[str]:
"""Get reactions from a single row."""
reaction_pattern = re.compile(r"reactions:\s*(.*)")
reaction_match = reaction_pattern.search(raw_target)
if reaction_match:
reactions = reaction_match.group(1).split(",")
reactions = [r.strip().lower() for r in reactions]
else:
reactions = []
return reactions
def get_reactions_set(dataset) -> Set[str]:
"""Get set of all reactions."""
reactions = set()
for data in dataset["train"]:
reactions.update(set(get_reactions_row(data["target"])))
return reactions
def get_samples(dataset, sample_size: int = 5):
"""Get processed sample.
Contains source text and also the reaction label.
Parse reaction text to specifically extract reactions.
"""
samples = []
for idx, data in enumerate(dataset["train"]):
if idx >= sample_size:
break
text = data["fulltext_processed"]
raw_target = data["target"]
reactions = get_reactions_row(raw_target)
samples.append({"text": text, "reactions": reactions})
return samples
from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack
from llama_index.core.llama_pack import download_llama_pack
InferRetrieveRerankPack = download_llama_pack(
"InferRetrieveRerankPack",
"./irr_pack",
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-16k")
pred_context = """\
The output predictins should be a list of comma-separated adverse \
drug reactions. \
"""
reranker_top_n = 10
pack = InferRetrieveRerankPack(
get_reactions_set(dataset),
llm=llm,
pred_context=pred_context,
reranker_top_n=reranker_top_n,
verbose=True,
)
samples = get_samples(dataset, sample_size=5)
pred_reactions = pack.run(inputs=[s["text"] for s in samples])
gt_reactions = [s["reactions"] for s in samples]
pred_reactions[2]
gt_reactions[2]
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
from llama_index.core.output_parsers import ChainableOutputParser
from typing import List
import random
all_reactions = get_reactions_set(dataset)
random.sample(all_reactions, 5)
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core import VectorStoreIndex
reaction_nodes = [TextNode(text=r) for r in all_reactions]
pipeline = IngestionPipeline(transformations=[OpenAIEmbedding()])
reaction_nodes = await pipeline.arun(documents=reaction_nodes)
index = VectorStoreIndex(reaction_nodes)
reaction_nodes[0].embedding
reaction_retriever = index.as_retriever(similarity_top_k=2)
nodes = reaction_retriever.retrieve("abdominal")
print([n.get_content() for n in nodes])
infer_prompt_str = """\
Your job is to output a list of predictions given context from a given piece of text. The text context,
and information regarding the set of valid predictions is given below.
Return the predictions as a comma-separated list of strings.
Text Context:
{doc_context}
Prediction Info:
{pred_context}
Predictions: """
infer_prompt = PromptTemplate(infer_prompt_str)
class PredsOutputParser(ChainableOutputParser):
"""Predictions output parser."""
def parse(self, output: str) -> List[str]:
"""Parse predictions."""
tokens = output.split(",")
return [t.strip() for t in tokens]
preds_output_parser = PredsOutputParser()
rerank_str = """\
Given a piece of text, rank the {num} labels above based on their relevance \
to this piece of text. The labels \
should be listed in descending order using identifiers. \
The most relevant labels should be listed first. \
The output format should be [] > [], e.g., [1] > [2]. \
Only response the ranking results, \
do not say any word or explain. \
Here is a given piece of text: {query}.
"""
rerank_prompt = | PromptTemplate(rerank_str) | llama_index.core.PromptTemplate |
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm')
get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference')
import hashlib
import json
from pathlib import Path
import os
import textwrap
from typing import List, Union
import llama_index.core
from llama_index.readers.web import SimpleWebPageReader
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.callbacks import CallbackManager
from llama_index.callbacks.openinference import OpenInferenceCallbackHandler
from llama_index.callbacks.openinference.base import (
as_dataframe,
QueryData,
NodeData,
)
from llama_index.core.node_parser import SimpleNodeParser
import pandas as pd
from tqdm import tqdm
documents = SimpleWebPageReader().load_data(
[
"https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt"
]
)
print(documents[0].text)
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(documents)
print(nodes[0].text)
callback_handler = OpenInferenceCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llama_index.core.Settings.callback_manager = callback_manager
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
max_characters_per_line = 80
queries = [
"What did Paul Graham do growing up?",
"When and how did Paul Graham's mother die?",
"What, in Paul Graham's opinion, is the most distinctive thing about YC?",
"When and how did Paul Graham meet Jessica Livingston?",
"What is Bel, and when and where was it written?",
]
for query in queries:
response = query_engine.query(query)
print("Query")
print("=====")
print(textwrap.fill(query, max_characters_per_line))
print()
print("Response")
print("========")
print(textwrap.fill(str(response), max_characters_per_line))
print()
query_data_buffer = callback_handler.flush_query_data_buffer()
query_dataframe = as_dataframe(query_data_buffer)
query_dataframe
class ParquetCallback:
def __init__(
self, data_path: Union[str, Path], max_buffer_length: int = 1000
):
self._data_path = Path(data_path)
self._data_path.mkdir(parents=True, exist_ok=False)
self._max_buffer_length = max_buffer_length
self._batch_index = 0
def __call__(
self,
query_data_buffer: List[QueryData],
node_data_buffer: List[NodeData],
) -> None:
if len(query_data_buffer) >= self._max_buffer_length:
query_dataframe = as_dataframe(query_data_buffer)
file_path = self._data_path / f"log-{self._batch_index}.parquet"
query_dataframe.to_parquet(file_path)
self._batch_index += 1
query_data_buffer.clear() # ⚠️ clear the buffer or it will keep growing forever!
node_data_buffer.clear() # didn't log node_data_buffer, but still need to clear it
data_path = "data"
parquet_writer = ParquetCallback(
data_path=data_path,
max_buffer_length=1,
)
callback_handler = OpenInferenceCallbackHandler(callback=parquet_writer)
callback_manager = | CallbackManager([callback_handler]) | llama_index.core.callbacks.CallbackManager |
get_ipython().system('pip install llama-index')
import openai
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
openai.api_key = os.environ["OPENAI_API_KEY"]
from typing import Any, List
from InstructorEmbedding import INSTRUCTOR
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.embeddings import BaseEmbedding
class InstructorEmbeddings(BaseEmbedding):
_model: INSTRUCTOR = | PrivateAttr() | llama_index.core.bridge.pydantic.PrivateAttr |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install rank-bm25 pymupdf')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama-index')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
splitter = SentenceSplitter(chunk_size=1024)
index = VectorStoreIndex.from_documents(documents, transformations=[splitter])
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
from llama_index.core import PromptTemplate
query_str = "How do the models developed in this work compare to open-source chat models based on the benchmarks tested?"
query_gen_prompt_str = (
"You are a helpful assistant that generates multiple search queries based on a "
"single input query. Generate {num_queries} search queries, one on each line, "
"related to the following input query:\n"
"Query: {query}\n"
"Queries:\n"
)
query_gen_prompt = PromptTemplate(query_gen_prompt_str)
def generate_queries(llm, query_str: str, num_queries: int = 4):
fmt_prompt = query_gen_prompt.format(
num_queries=num_queries - 1, query=query_str
)
response = llm.complete(fmt_prompt)
queries = response.text.split("\n")
return queries
queries = generate_queries(llm, query_str, num_queries=4)
print(queries)
from tqdm.asyncio import tqdm
async def run_queries(queries, retrievers):
"""Run queries against retrievers."""
tasks = []
for query in queries:
for i, retriever in enumerate(retrievers):
tasks.append(retriever.aretrieve(query))
task_results = await tqdm.gather(*tasks)
results_dict = {}
for i, (query, query_result) in enumerate(zip(queries, task_results)):
results_dict[(query, i)] = query_result
return results_dict
from llama_index.core.retrievers import BM25Retriever
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=index.docstore, similarity_top_k=2
)
results_dict = await run_queries(queries, [vector_retriever, bm25_retriever])
def fuse_results(results_dict, similarity_top_k: int = 2):
"""Fuse results."""
k = 60.0 # `k` is a parameter used to control the impact of outlier rankings.
fused_scores = {}
text_to_node = {}
for nodes_with_scores in results_dict.values():
for rank, node_with_score in enumerate(
sorted(
nodes_with_scores, key=lambda x: x.score or 0.0, reverse=True
)
):
text = node_with_score.node.get_content()
text_to_node[text] = node_with_score
if text not in fused_scores:
fused_scores[text] = 0.0
fused_scores[text] += 1.0 / (rank + k)
reranked_results = dict(
sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)
)
reranked_nodes: List[NodeWithScore] = []
for text, score in reranked_results.items():
reranked_nodes.append(text_to_node[text])
reranked_nodes[-1].score = score
return reranked_nodes[:similarity_top_k]
final_results = fuse_results(results_dict)
from llama_index.core.response.notebook_utils import display_source_node
for n in final_results:
display_source_node(n, source_length=500)
from llama_index.core import QueryBundle
from llama_index.core.retrievers import BaseRetriever
from typing import Any, List
from llama_index.core.schema import NodeWithScore
class FusionRetriever(BaseRetriever):
"""Ensemble retriever with fusion."""
def __init__(
self,
llm,
retrievers: List[BaseRetriever],
similarity_top_k: int = 2,
) -> None:
"""Init params."""
self._retrievers = retrievers
self._similarity_top_k = similarity_top_k
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
queries = generate_queries(llm, query_str, num_queries=4)
results = run_queries(queries, [vector_retriever, bm25_retriever])
final_results = fuse_results(
results_dict, similarity_top_k=self._similarity_top_k
)
return final_results
from llama_index.core.query_engine import RetrieverQueryEngine
fusion_retriever = FusionRetriever(
llm, [vector_retriever, bm25_retriever], similarity_top_k=2
)
query_engine = | RetrieverQueryEngine(fusion_retriever) | llama_index.core.query_engine.RetrieverQueryEngine |
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import (
FixedRecencyPostprocessor,
EmbeddingRecencyPostprocessor,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import StorageContext
def get_file_metadata(file_name: str):
"""Get file metadata."""
if "v1" in file_name:
return {"date": "2020-01-01"}
elif "v2" in file_name:
return {"date": "2020-02-03"}
elif "v3" in file_name:
return {"date": "2022-04-12"}
else:
raise ValueError("invalid file")
documents = SimpleDirectoryReader(
input_files=[
"test_versioned_data/paul_graham_essay_v1.txt",
"test_versioned_data/paul_graham_essay_v2.txt",
"test_versioned_data/paul_graham_essay_v3.txt",
],
file_metadata=get_file_metadata,
).load_data()
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
nodes = | Settings.text_splitter.get_nodes_from_documents(documents) | llama_index.core.Settings.text_splitter.get_nodes_from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-replicate')
get_ipython().system('pip install llama-index')
from llama_index.llms.replicate import Replicate
from llama_index.core.llms.llama_utils import messages_to_prompt
llm_13b = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
context_window=4096,
messages_to_prompt=messages_to_prompt, # override message representation for llama 2
)
llm_70b = Replicate(
model="replicate/llama70b-v2-chat:e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48",
context_window=4096,
messages_to_prompt=messages_to_prompt, # override message representation for llama 2
)
from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.llms import ChatMessage
bot_70b = SimpleChatEngine(
llm=llm_70b,
memory= | ChatMemoryBuffer.from_defaults(llm=llm_70b) | llama_index.core.memory.ChatMemoryBuffer.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray')
get_ipython().system('pip install llama-index')
import os
import sys
import logging
import textwrap
import warnings
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from llama_index.core import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
Document,
)
from llama_index.vector_stores.docarray import DocArrayInMemoryVectorStore
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "<your openai key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
from llama_index.core import StorageContext
vector_store = | DocArrayInMemoryVectorStore() | llama_index.vector_stores.docarray.DocArrayInMemoryVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core.ingestion.cache import RedisCache
from llama_index.core.ingestion import IngestionCache
ingest_cache = IngestionCache(
cache=RedisCache.from_host_and_port(host="127.0.0.1", port=6379),
collection="my_test_cache",
)
get_ipython().system('pip install weaviate-client')
import weaviate
auth_config = weaviate.AuthApiKey(api_key="...")
client = weaviate.Client(url="https://...", auth_client_secret=auth_config)
from llama_index.vector_stores.weaviate import WeaviateVectorStore
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name="CachingTest"
)
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
text_splitter = | TokenTextSplitter(chunk_size=512) | llama_index.core.node_parser.TokenTextSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-make-com')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.readers.make_com import MakeWrapper
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = | VectorStoreIndex.from_documents(documents=documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-ollama')
get_ipython().system('pip install llama-index')
from llama_index.llms.ollama import Ollama
llm = | Ollama(model="llama2", request_timeout=30.0) | llama_index.llms.ollama.Ollama |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('pip install llama-index')
get_ipython().system('pip install spacy')
wiki_titles = [
"Toronto",
"Seattle",
"Chicago",
"Boston",
"Houston",
"Tokyo",
"Berlin",
"Lisbon",
]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
from llama_index.core import SimpleDirectoryReader
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
city_descs_dict = {}
choices = []
choice_to_id_dict = {}
for idx, wiki_title in enumerate(wiki_titles):
vector_desc = (
"Useful for questions related to specific aspects of"
f" {wiki_title} (e.g. the history, arts and culture,"
" sports, demographics, or more)."
)
summary_desc = (
"Useful for any requests that require a holistic summary"
f" of EVERYTHING about {wiki_title}. For questions about"
" more specific sections, please use the vector_tool."
)
doc_id_vector = f"{wiki_title}_vector"
doc_id_summary = f"{wiki_title}_summary"
city_descs_dict[doc_id_vector] = vector_desc
city_descs_dict[doc_id_summary] = summary_desc
choices.extend([vector_desc, summary_desc])
choice_to_id_dict[idx * 2] = f"{wiki_title}_vector"
choice_to_id_dict[idx * 2 + 1] = f"{wiki_title}_summary"
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
llm = OpenAI(model_name="gpt-3.5-turbo")
summary_q_tmpl = """\
You are a summary question generator. Given an existing question which asks for a summary of a given topic, \
generate {num_vary} related queries that also ask for a summary of the topic.
For example, assuming we're generating 3 related questions:
Base Question: Can you tell me more about Boston?
Question Variations:
Give me an overview of Boston as a city.
Can you describe different aspects of Boston, from the history to the sports scene to the food?
Write a concise summary of Boston; I've never been.
Now let's give it a shot!
Base Question: {base_question}
Question Variations:
"""
summary_q_prompt = PromptTemplate(summary_q_tmpl)
from collections import defaultdict
from llama_index.core.evaluation import DatasetGenerator
from llama_index.core.evaluation import EmbeddingQAFinetuneDataset
from llama_index.core.node_parser import SimpleNodeParser
from tqdm.notebook import tqdm
def generate_dataset(
wiki_titles,
city_descs_dict,
llm,
summary_q_prompt,
num_vector_qs_per_node=2,
num_summary_qs=4,
):
queries = {}
corpus = {}
relevant_docs = defaultdict(list)
for idx, wiki_title in enumerate(tqdm(wiki_titles)):
doc_id_vector = f"{wiki_title}_vector"
doc_id_summary = f"{wiki_title}_summary"
corpus[doc_id_vector] = city_descs_dict[doc_id_vector]
corpus[doc_id_summary] = city_descs_dict[doc_id_summary]
node_parser = SimpleNodeParser.from_defaults()
nodes = node_parser.get_nodes_from_documents(city_docs[wiki_title])
dataset_generator = DatasetGenerator(
nodes,
llm=llm,
num_questions_per_chunk=num_vector_qs_per_node,
)
doc_questions = dataset_generator.generate_questions_from_nodes(
num=len(nodes) * num_vector_qs_per_node
)
for query_idx, doc_question in enumerate(doc_questions):
query_id = f"{wiki_title}_{query_idx}"
relevant_docs[query_id] = [doc_id_vector]
queries[query_id] = doc_question
base_q = f"Give me a summary of {wiki_title}"
fmt_prompt = summary_q_prompt.format(
num_vary=num_summary_qs,
base_question=base_q,
)
raw_response = llm.complete(fmt_prompt)
raw_lines = str(raw_response).split("\n")
doc_summary_questions = [l for l in raw_lines if l != ""]
print(f"[{idx}] Original Question: {base_q}")
print(
f"[{idx}] Generated Question Variations: {doc_summary_questions}"
)
for query_idx, doc_summary_question in enumerate(
doc_summary_questions
):
query_id = f"{wiki_title}_{query_idx}"
relevant_docs[query_id] = [doc_id_summary]
queries[query_id] = doc_summary_question
return EmbeddingQAFinetuneDataset(
queries=queries, corpus=corpus, relevant_docs=relevant_docs
)
dataset = generate_dataset(
wiki_titles,
city_descs_dict,
llm,
summary_q_prompt,
num_vector_qs_per_node=4,
num_summary_qs=5,
)
dataset.save_json("dataset.json")
dataset = | EmbeddingQAFinetuneDataset.from_json("dataset.json") | llama_index.core.evaluation.EmbeddingQAFinetuneDataset.from_json |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openrouter')
get_ipython().system('pip install llama-index')
from llama_index.llms.openrouter import OpenRouter
from llama_index.core.llms import ChatMessage
llm = OpenRouter(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="gryphe/mythomax-l2-13b",
)
message = ChatMessage(role="user", content="Tell me a joke")
resp = llm.chat([message])
print(resp)
message = ChatMessage(role="user", content="Tell me a story in 250 words")
resp = llm.stream_chat([message])
for r in resp:
print(r.delta, end="")
resp = llm.complete("Tell me a joke")
print(resp)
resp = llm.stream_complete("Tell me a story in 250 words")
for r in resp:
print(r.delta, end="")
llm = | OpenRouter(model="mistralai/mixtral-8x7b-instruct") | llama_index.llms.openrouter.OpenRouter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-.."
openai.api_key = os.environ["OPENAI_API_KEY"]
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
)
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
from llama_index.core import SQLDatabase
from llama_index.llms.openai import OpenAI
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{
"city_name": "Chicago",
"population": 2679000,
"country": "United States",
},
{"city_name": "Seoul", "population": 9776000, "country": "South Korea"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
stmt = select(
city_stats_table.c.city_name,
city_stats_table.c.population,
city_stats_table.c.country,
).select_from(city_stats_table)
with engine.connect() as connection:
results = connection.execute(stmt).fetchall()
print(results)
from sqlalchemy import text
with engine.connect() as con:
rows = con.execute(text("SELECT city_name from city_stats"))
for row in rows:
print(row)
from llama_index.core.query_engine import NLSQLTableQueryEngine
query_engine = NLSQLTableQueryEngine(
sql_database=sql_database, tables=["city_stats"], llm=llm
)
query_str = "Which city has the highest population?"
response = query_engine.query(query_str)
display(Markdown(f"<b>{response}</b>"))
from llama_index.core.indices.struct_store.sql_query import (
SQLTableRetrieverQueryEngine,
)
from llama_index.core.objects import (
SQLTableNodeMapping,
ObjectIndex,
SQLTableSchema,
)
from llama_index.core import VectorStoreIndex
table_node_mapping = | SQLTableNodeMapping(sql_database) | llama_index.core.objects.SQLTableNodeMapping |
get_ipython().run_line_magic('pip', 'install llama-index-llms-portkey')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install -U llama_index')
get_ipython().system('pip install -U portkey-ai')
from llama_index.llms.portkey import Portkey
from llama_index.core.llms import ChatMessage
import portkey as pk
import os
os.environ["PORTKEY_API_KEY"] = "PORTKEY_API_KEY"
openai_virtual_key_a = ""
openai_virtual_key_b = ""
anthropic_virtual_key_a = ""
anthropic_virtual_key_b = ""
cohere_virtual_key_a = ""
cohere_virtual_key_b = ""
os.environ["OPENAI_API_KEY"] = ""
os.environ["ANTHROPIC_API_KEY"] = ""
portkey_client = Portkey(
mode="single",
)
openai_llm = pk.LLMOptions(
provider="openai",
model="gpt-4",
virtual_key=openai_virtual_key_a,
)
portkey_client.add_llms(openai_llm)
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
print("Testing Portkey Llamaindex integration:")
response = portkey_client.chat(messages)
print(response)
prompt = "Why is the sky blue?"
print("\nTesting Stream Complete:\n")
response = portkey_client.stream_complete(prompt)
for i in response:
print(i.delta, end="", flush=True)
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
print("\nTesting Stream Chat:\n")
response = portkey_client.stream_chat(messages)
for i in response:
print(i.delta, end="", flush=True)
portkey_client = Portkey(mode="fallback")
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
llm1 = pk.LLMOptions(
provider="openai",
model="gpt-4",
retry_settings={"on_status_codes": [429, 500], "attempts": 2},
virtual_key=openai_virtual_key_a,
)
llm2 = pk.LLMOptions(
provider="openai",
model="gpt-3.5-turbo",
virtual_key=openai_virtual_key_b,
)
portkey_client.add_llms(llm_params=[llm1, llm2])
print("Testing Fallback & Retry functionality:")
response = portkey_client.chat(messages)
print(response)
portkey_client = Portkey(mode="ab_test")
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
llm1 = pk.LLMOptions(
provider="openai",
model="gpt-4",
virtual_key=openai_virtual_key_a,
weight=0.2,
)
llm2 = pk.LLMOptions(
provider="openai",
model="gpt-3.5-turbo",
virtual_key=openai_virtual_key_a,
weight=0.8,
)
portkey_client.add_llms(llm_params=[llm1, llm2])
print("Testing Loadbalance functionality:")
response = portkey_client.chat(messages)
print(response)
import time
portkey_client = | Portkey(mode="single") | llama_index.llms.portkey.Portkey |
get_ipython().run_line_magic('pip', 'install llama-index-readers-database')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from __future__ import absolute_import
import os
os.environ["OPENAI_API_KEY"] = ""
from llama_index.readers.database import DatabaseReader
from llama_index.core import VectorStoreIndex
db = DatabaseReader(
scheme="postgresql", # Database Scheme
host="localhost", # Database Host
port="5432", # Database Port
user="postgres", # Database User
password="FakeExamplePassword", # Database Password
dbname="postgres", # Database Name
)
print(type(db))
print(type(db.load_data))
print(type(db.sql_database))
print(type(db.sql_database.from_uri))
print(type(db.sql_database.get_single_table_info))
print(type(db.sql_database.get_table_columns))
print(type(db.sql_database.get_usable_table_names))
print(type(db.sql_database.insert_into_table))
print(type(db.sql_database.run_sql))
print(type(db.sql_database.dialect))
print(type(db.sql_database.engine))
print(type(db.sql_database))
db_from_sql_database = DatabaseReader(sql_database=db.sql_database)
print(type(db_from_sql_database))
print(type(db.sql_database.engine))
db_from_engine = DatabaseReader(engine=db.sql_database.engine)
print(type(db_from_engine))
print(type(db.uri))
db_from_uri = DatabaseReader(uri=db.uri)
print(type(db_from_uri))
query = f"""
SELECT
CONCAT(name, ' is ', age, ' years old.') AS text
FROM public.users
WHERE age >= 18
"""
texts = db.sql_database.run_sql(command=query)
print(type(texts))
print(texts)
documents = db.load_data(query=query)
print(type(documents))
print(documents)
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-evaluation-tonic-validate')
import json
import pandas as pd
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.evaluation.tonic_validate import (
AnswerConsistencyEvaluator,
AnswerSimilarityEvaluator,
AugmentationAccuracyEvaluator,
AugmentationPrecisionEvaluator,
RetrievalPrecisionEvaluator,
TonicValidateEvaluator,
)
question = "What makes Sam Altman a good founder?"
reference_answer = "He is smart and has a great force of will."
llm_answer = "He is a good founder because he is smart."
retrieved_context_list = [
"Sam Altman is a good founder. He is very smart.",
"What makes Sam Altman such a good founder is his great force of will.",
]
answer_similarity_evaluator = AnswerSimilarityEvaluator()
score = await answer_similarity_evaluator.aevaluate(
question,
llm_answer,
retrieved_context_list,
reference_response=reference_answer,
)
score
answer_consistency_evaluator = AnswerConsistencyEvaluator()
score = await answer_consistency_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
augmentation_accuracy_evaluator = AugmentationAccuracyEvaluator()
score = await augmentation_accuracy_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
augmentation_precision_evaluator = AugmentationPrecisionEvaluator()
score = await augmentation_precision_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
retrieval_precision_evaluator = RetrievalPrecisionEvaluator()
score = await retrieval_precision_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
tonic_validate_evaluator = | TonicValidateEvaluator() | llama_index.evaluation.tonic_validate.TonicValidateEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-evaluation-tonic-validate')
import json
import pandas as pd
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.evaluation.tonic_validate import (
AnswerConsistencyEvaluator,
AnswerSimilarityEvaluator,
AugmentationAccuracyEvaluator,
AugmentationPrecisionEvaluator,
RetrievalPrecisionEvaluator,
TonicValidateEvaluator,
)
question = "What makes Sam Altman a good founder?"
reference_answer = "He is smart and has a great force of will."
llm_answer = "He is a good founder because he is smart."
retrieved_context_list = [
"Sam Altman is a good founder. He is very smart.",
"What makes Sam Altman such a good founder is his great force of will.",
]
answer_similarity_evaluator = AnswerSimilarityEvaluator()
score = await answer_similarity_evaluator.aevaluate(
question,
llm_answer,
retrieved_context_list,
reference_response=reference_answer,
)
score
answer_consistency_evaluator = AnswerConsistencyEvaluator()
score = await answer_consistency_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
augmentation_accuracy_evaluator = AugmentationAccuracyEvaluator()
score = await augmentation_accuracy_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
augmentation_precision_evaluator = AugmentationPrecisionEvaluator()
score = await augmentation_precision_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
retrieval_precision_evaluator = RetrievalPrecisionEvaluator()
score = await retrieval_precision_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
tonic_validate_evaluator = TonicValidateEvaluator()
scores = await tonic_validate_evaluator.aevaluate(
question,
llm_answer,
retrieved_context_list,
reference_response=reference_answer,
)
scores.score_dict
tonic_validate_evaluator = | TonicValidateEvaluator() | llama_index.evaluation.tonic_validate.TonicValidateEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install duckdb duckdb-engine')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SQLDatabase, SimpleDirectoryReader, Document
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.indices.struct_store import SQLTableRetrieverQueryEngine
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("duckdb:///:memory:")
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{
"city_name": "Chicago",
"population": 2679000,
"country": "United States",
},
{"city_name": "Seoul", "population": 9776000, "country": "South Korea"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
from llama_index.core import SQLDatabase
sql_database = | SQLDatabase(engine, include_tables=["city_stats"]) | llama_index.core.SQLDatabase |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=')
get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=')
get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma')
get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = | resolve_embed_model("local:BAAI/bge-small-en") | llama_index.core.embeddings.resolve_embed_model |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import TimeWeightedPostprocessor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from datetime import datetime, timedelta
from llama_index.core import StorageContext
now = datetime.now()
key = "__last_accessed__"
doc1 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v1.txt"]
).load_data()[0]
doc2 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v2.txt"]
).load_data()[0]
doc3 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v3.txt"]
).load_data()[0]
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
nodes1 = Settings.text_splitter.get_nodes_from_documents([doc1])
nodes2 = Settings.text_splitter.get_nodes_from_documents([doc2])
nodes3 = Settings.text_splitter.get_nodes_from_documents([doc3])
nodes1[14].metadata[key] = (now - timedelta(hours=3)).timestamp()
nodes1[14].excluded_llm_metadata_keys = [key]
nodes2[14].metadata[key] = (now - timedelta(hours=2)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
nodes3[14].metadata[key] = (now - timedelta(hours=1)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
docstore = SimpleDocumentStore()
nodes = [nodes1[14], nodes2[14], nodes3[14]]
docstore.add_documents(nodes)
storage_context = StorageContext.from_defaults(docstore=docstore)
index = VectorStoreIndex(nodes, storage_context=storage_context)
node_postprocessor = TimeWeightedPostprocessor(
time_decay=0.5, time_access_refresh=False, top_k=1
)
query_engine = index.as_query_engine(
similarity_top_k=3,
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
| display_response(response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-faiss')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import faiss
d = 1536
faiss_index = faiss.IndexFlatL2(d)
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.faiss import FaissVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = | FaissVectorStore(faiss_index=faiss_index) | llama_index.vector_stores.faiss.FaissVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().system('pip install llama-index')
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west4-gcp-free")
import os
import getpass
import openai
openai.api_key = "sk-<your-key>"
try:
pinecone.create_index(
"quickstart-index", dimension=1536, metric="euclidean", pod_type="p1"
)
except Exception:
pass
pinecone_index = pinecone.Index("quickstart-index")
pinecone_index.delete(deleteAll=True, namespace="test")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text=(
"Michael Jordan is a retired professional basketball player,"
" widely regarded as one of the greatest basketball players of all"
" time."
),
metadata={
"category": "Sports",
"country": "United States",
"gender": "male",
"born": 1963,
},
),
TextNode(
text=(
"Angelina Jolie is an American actress, filmmaker, and"
" humanitarian. She has received numerous awards for her acting"
" and is known for her philanthropic work."
),
metadata={
"category": "Entertainment",
"country": "United States",
"gender": "female",
"born": 1975,
},
),
TextNode(
text=(
"Elon Musk is a business magnate, industrial designer, and"
" engineer. He is the founder, CEO, and lead designer of SpaceX,"
" Tesla, Inc., Neuralink, and The Boring Company."
),
metadata={
"category": "Business",
"country": "United States",
"gender": "male",
"born": 1971,
},
),
TextNode(
text=(
"Rihanna is a Barbadian singer, actress, and businesswoman. She"
" has achieved significant success in the music industry and is"
" known for her versatile musical style."
),
metadata={
"category": "Music",
"country": "Barbados",
"gender": "female",
"born": 1988,
},
),
TextNode(
text=(
"Cristiano Ronaldo is a Portuguese professional footballer who is"
" considered one of the greatest football players of all time. He"
" has won numerous awards and set multiple records during his"
" career."
),
metadata={
"category": "Sports",
"country": "Portugal",
"gender": "male",
"born": 1985,
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="test"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.tools import FunctionTool
from llama_index.core.vector_stores import (
VectorStoreInfo,
MetadataInfo,
MetadataFilter,
MetadataFilters,
FilterCondition,
FilterOperator,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from typing import List, Tuple, Any
from pydantic import BaseModel, Field
top_k = 3
vector_store_info = VectorStoreInfo(
content_info="brief biography of celebrities",
metadata_info=[
MetadataInfo(
name="category",
type="str",
description=(
"Category of the celebrity, one of [Sports, Entertainment,"
" Business, Music]"
),
),
MetadataInfo(
name="country",
type="str",
description=(
"Country of the celebrity, one of [United States, Barbados,"
" Portugal]"
),
),
MetadataInfo(
name="gender",
type="str",
description=("Gender of the celebrity, one of [male, female]"),
),
MetadataInfo(
name="born",
type="int",
description=("Born year of the celebrity, could be any integer"),
),
],
)
class AutoRetrieveModel(BaseModel):
query: str = Field(..., description="natural language query string")
filter_key_list: List[str] = Field(
..., description="List of metadata filter field names"
)
filter_value_list: List[Any] = Field(
...,
description=(
"List of metadata filter field values (corresponding to names"
" specified in filter_key_list)"
),
)
filter_operator_list: List[str] = Field(
...,
description=(
"Metadata filters conditions (could be one of <, <=, >, >=, ==, !=)"
),
)
filter_condition: str = Field(
...,
description=("Metadata filters condition values (could be AND or OR)"),
)
description = f"""\
Use this tool to look up biographical information about celebrities.
The vector database schema is given below:
{vector_store_info.json()}
"""
def auto_retrieve_fn(
query: str,
filter_key_list: List[str],
filter_value_list: List[any],
filter_operator_list: List[str],
filter_condition: str,
):
"""Auto retrieval function.
Performs auto-retrieval from a vector database, and then applies a set of filters.
"""
query = query or "Query"
metadata_filters = [
MetadataFilter(key=k, value=v, operator=op)
for k, v, op in zip(
filter_key_list, filter_value_list, filter_operator_list
)
]
retriever = VectorIndexRetriever(
index,
filters=MetadataFilters(
filters=metadata_filters, condition=filter_condition
),
top_k=top_k,
)
query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query(query)
return str(response)
auto_retrieve_tool = FunctionTool.from_defaults(
fn=auto_retrieve_fn,
name="celebrity_bios",
description=description,
fn_schema=AutoRetrieveModel,
)
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
agent = OpenAIAgent.from_tools(
[auto_retrieve_tool],
llm=OpenAI(temperature=0, model="gpt-4-0613"),
verbose=True,
)
response = agent.chat("Tell me about two celebrities from the United States. ")
print(str(response))
response = agent.chat("Tell me about two celebrities born after 1980. ")
print(str(response))
response = agent.chat(
"Tell me about few celebrities under category business and born after 1950. "
)
print(str(response))
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
from llama_index.core import SQLDatabase
from llama_index.core.indices import SQLStructStoreIndex
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
from llama_index.core.query_engine import NLSQLTableQueryEngine
query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["city_stats"],
)
get_ipython().system('pip install wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = WikipediaReader().load_data(pages=cities)
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp")
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.core import Settings
from llama_index.core import StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.llms.openai import OpenAI
Settings.llm = OpenAI(temperature=0, model="gpt-4")
Settings.node_parser = | TokenTextSplitter(chunk_size=1024) | llama_index.core.node_parser.TokenTextSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-everlyai')
get_ipython().system('pip install llama-index')
from llama_index.llms.everlyai import EverlyAI
from llama_index.core.llms import ChatMessage
llm = EverlyAI(api_key="your-api-key")
message = ChatMessage(role="user", content="Tell me a joke")
resp = llm.chat([message])
print(resp)
message = | ChatMessage(role="user", content="Tell me a story in 250 words") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY_HERE"
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import weaviate
resource_owner_config = weaviate.AuthClientPassword(
username="<username>",
password="<password>",
)
client = weaviate.Client(
"https://llama-test-ezjahb4m.weaviate.network",
auth_client_secret=resource_owner_config,
)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import StorageContext
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name="LlamaIndex"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
resource_owner_config = weaviate.AuthClientPassword(
username="<username>",
password="<password>",
)
client = weaviate.Client(
"https://llama-test-ezjahb4m.weaviate.network",
auth_client_secret=resource_owner_config,
)
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name="LlamaIndex"
)
loaded_index = | VectorStoreIndex.from_vector_store(vector_store) | llama_index.core.VectorStoreIndex.from_vector_store |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.schema import TextNode
from llama_index.core.indices.managed.types import ManagedIndexQueryMode
from llama_index.indices.managed.vectara import VectaraIndex
from llama_index.indices.managed.vectara import VectaraAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
from llama_index.llms.openai import OpenAI
nodes = [
| TextNode(
text=(
"A pragmatic paleontologist touring an almost complete theme park on an island "
+ "in Central America is tasked with protecting a couple of kids after a power "
+ "failure causes the park's cloned dinosaurs to run loose."
) | llama_index.core.schema.TextNode |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import camelot
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.schema import IndexNode
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from typing import List
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = | OpenAIEmbedding(model="text-embedding-3-small") | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().system('pip install llama-index')
from llama_index.llms.anthropic import Anthropic
from llama_index.core import Settings
tokenizer = Anthropic().tokenizer
Settings.tokenizer = tokenizer
import os
os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY"
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229")
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.anthropic import Anthropic
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = Anthropic(model="claude-3-opus-20240229").chat(messages)
print(resp)
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229", max_tokens=100)
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229")
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-sonnet-20240229")
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.anthropic import Anthropic
llm = | Anthropic("claude-3-sonnet-20240229") | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
os.environ["OPENAI_API_KEY"] = "INSERT OPENAI KEY"
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
from llama_index.core import SimpleDirectoryReader, KnowledgeGraphIndex
from llama_index.core.graph_stores import SimpleGraphStore
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
from IPython.display import Markdown, display
documents = SimpleDirectoryReader(
"../../../../examples/paul_graham_essay/data"
).load_data()
llm = OpenAI(temperature=0, model="text-davinci-002")
Settings.llm = llm
Settings.chunk_size = 512
from llama_index.core import StorageContext
graph_store = SimpleGraphStore()
storage_context = StorageContext.from_defaults(graph_store=graph_store)
index = KnowledgeGraphIndex.from_documents(
documents,
max_triplets_per_chunk=2,
storage_context=storage_context,
)
query_engine = index.as_query_engine(
include_text=False, response_mode="tree_summarize"
)
response = query_engine.query(
"Tell me more about Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
query_engine = index.as_query_engine(
include_text=True, response_mode="tree_summarize"
)
response = query_engine.query(
"Tell me more about what the author worked on at Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
new_index = KnowledgeGraphIndex.from_documents(
documents,
max_triplets_per_chunk=2,
include_embeddings=True,
)
query_engine = index.as_query_engine(
include_text=True,
response_mode="tree_summarize",
embedding_mode="hybrid",
similarity_top_k=5,
)
response = query_engine.query(
"Tell me more about what the author worked on at Interleaf",
)
display(Markdown(f"<b>{response}</b>"))
from pyvis.network import Network
g = index.get_networkx_graph()
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(g)
net.show("example.html")
from llama_index.core.node_parser import SentenceSplitter
node_parser = | SentenceSplitter() | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import camelot
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.schema import IndexNode
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from typing import List
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
file_path = "billionaires_page.pdf"
reader = PyMuPDFReader()
docs = reader.load(file_path)
def get_tables(path: str, pages: List[int]):
table_dfs = []
for page in pages:
table_list = camelot.read_pdf(path, pages=str(page))
table_df = table_list[0].df
table_df = (
table_df.rename(columns=table_df.iloc[0])
.drop(table_df.index[0])
.reset_index(drop=True)
)
table_dfs.append(table_df)
return table_dfs
table_dfs = get_tables(file_path, pages=[3, 25])
table_dfs[0]
table_dfs[1]
llm = OpenAI(model="gpt-4")
df_query_engines = [
| PandasQueryEngine(table_df, llm=llm) | llama_index.core.query_engine.PandasQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
from llama_index.core import set_global_handler
set_global_handler("wandb", run_args={"project": "llamaindex"})
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.llms.openai import OpenAI
from llama_index.core.schema import MetadataMode
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512)
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
)
node_parser = TokenTextSplitter(
separator=" ", chunk_size=256, chunk_overlap=128
)
extractors_1 = [
QuestionsAnsweredExtractor(
questions=3, llm=llm, metadata_mode=MetadataMode.EMBED
),
]
extractors_2 = [
SummaryExtractor(summaries=["prev", "self", "next"], llm=llm),
QuestionsAnsweredExtractor(
questions=3, llm=llm, metadata_mode=MetadataMode.EMBED
),
]
from llama_index.core import SimpleDirectoryReader
from llama_index.readers.web import SimpleWebPageReader
reader = | SimpleWebPageReader(html_to_text=True) | llama_index.readers.web.SimpleWebPageReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index qdrant-client pypdf "transformers[torch]"')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/").load_data()
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core import Settings
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore(
"llama2_paper", client=client, enable_hybrid=True, batch_size=20
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
Settings.chunk_size = 512
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
query_engine = index.as_query_engine(
similarity_top_k=2, sparse_top_k=12, vector_store_query_mode="hybrid"
)
from IPython.display import display, Markdown
response = query_engine.query(
"How was Llama2 specifically trained differently from Llama1?"
)
display(Markdown(str(response)))
print(len(response.source_nodes))
from IPython.display import display, Markdown
query_engine = index.as_query_engine(
similarity_top_k=2,
)
response = query_engine.query(
"How was Llama2 specifically trained differently from Llama1?"
)
display(Markdown(str(response)))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core import Settings
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import AsyncQdrantClient
aclient = AsyncQdrantClient(path="./qdrant_data_async")
vector_store = QdrantVectorStore(
collection_name="llama2_paper",
aclient=aclient,
enable_hybrid=True,
batch_size=20,
)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
from pathlib import Path
data_dir = Path("./WikiTableQuestions/csv/200-csv")
csv_files = sorted([f for f in data_dir.glob("*.csv")])
dfs = []
for csv_file in csv_files:
print(f"processing file: {csv_file}")
try:
df = pd.read_csv(csv_file)
dfs.append(df)
except Exception as e:
print(f"Error parsing {csv_file}: {str(e)}")
tableinfo_dir = "WikiTableQuestions_TableInfo"
get_ipython().system('mkdir {tableinfo_dir}')
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.llms.openai import OpenAI
class TableInfo(BaseModel):
"""Information regarding a structured table."""
table_name: str = Field(
..., description="table name (must be underscores and NO spaces)"
)
table_summary: str = Field(
..., description="short, concise summary/caption of the table"
)
prompt_str = """\
Give me a summary of the table with the following JSON format.
- The table name must be unique to the table and describe it while being concise.
- Do NOT output a generic table name (e.g. table, my_table).
Do NOT make the table name one of the following: {exclude_table_name_list}
Table:
{table_str}
Summary: """
program = LLMTextCompletionProgram.from_defaults(
output_cls=TableInfo,
llm=OpenAI(model="gpt-3.5-turbo"),
prompt_template_str=prompt_str,
)
import json
def _get_tableinfo_with_index(idx: int) -> str:
results_gen = Path(tableinfo_dir).glob(f"{idx}_*")
results_list = list(results_gen)
if len(results_list) == 0:
return None
elif len(results_list) == 1:
path = results_list[0]
return TableInfo.parse_file(path)
else:
raise ValueError(
f"More than one file matching index: {list(results_gen)}"
)
table_names = set()
table_infos = []
for idx, df in enumerate(dfs):
table_info = _get_tableinfo_with_index(idx)
if table_info:
table_infos.append(table_info)
else:
while True:
df_str = df.head(10).to_csv()
table_info = program(
table_str=df_str,
exclude_table_name_list=str(list(table_names)),
)
table_name = table_info.table_name
print(f"Processed table: {table_name}")
if table_name not in table_names:
table_names.add(table_name)
break
else:
print(f"Table name {table_name} already exists, trying again.")
pass
out_file = f"{tableinfo_dir}/{idx}_{table_name}.json"
json.dump(table_info.dict(), open(out_file, "w"))
table_infos.append(table_info)
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
)
import re
def sanitize_column_name(col_name):
return re.sub(r"\W+", "_", col_name)
def create_table_from_dataframe(
df: pd.DataFrame, table_name: str, engine, metadata_obj
):
sanitized_columns = {col: sanitize_column_name(col) for col in df.columns}
df = df.rename(columns=sanitized_columns)
columns = [
Column(col, String if dtype == "object" else Integer)
for col, dtype in zip(df.columns, df.dtypes)
]
table = Table(table_name, metadata_obj, *columns)
metadata_obj.create_all(engine)
with engine.connect() as conn:
for _, row in df.iterrows():
insert_stmt = table.insert().values(**row.to_dict())
conn.execute(insert_stmt)
conn.commit()
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
for idx, df in enumerate(dfs):
tableinfo = _get_tableinfo_with_index(idx)
print(f"Creating table: {tableinfo.table_name}")
create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj)
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.objects import (
SQLTableNodeMapping,
ObjectIndex,
SQLTableSchema,
)
from llama_index.core import SQLDatabase, VectorStoreIndex
sql_database = SQLDatabase(engine)
table_node_mapping = SQLTableNodeMapping(sql_database)
table_schema_objs = [
SQLTableSchema(table_name=t.table_name, context_str=t.table_summary)
for t in table_infos
] # add a SQLTableSchema for each table
obj_index = ObjectIndex.from_objects(
table_schema_objs,
table_node_mapping,
VectorStoreIndex,
)
obj_retriever = obj_index.as_retriever(similarity_top_k=3)
from llama_index.core.retrievers import SQLRetriever
from typing import List
from llama_index.core.query_pipeline import FnComponent
sql_retriever = SQLRetriever(sql_database)
def get_table_context_str(table_schema_objs: List[SQLTableSchema]):
"""Get table context string."""
context_strs = []
for table_schema_obj in table_schema_objs:
table_info = sql_database.get_single_table_info(
table_schema_obj.table_name
)
if table_schema_obj.context_str:
table_opt_context = " The table description is: "
table_opt_context += table_schema_obj.context_str
table_info += table_opt_context
context_strs.append(table_info)
return "\n\n".join(context_strs)
table_parser_component = FnComponent(fn=get_table_context_str)
from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import FnComponent
from llama_index.core.llms import ChatResponse
def parse_response_to_sql(response: ChatResponse) -> str:
"""Parse response to SQL."""
response = response.message.content
sql_query_start = response.find("SQLQuery:")
if sql_query_start != -1:
response = response[sql_query_start:]
if response.startswith("SQLQuery:"):
response = response[len("SQLQuery:") :]
sql_result_start = response.find("SQLResult:")
if sql_result_start != -1:
response = response[:sql_result_start]
return response.strip().strip("```").strip()
sql_parser_component = FnComponent(fn=parse_response_to_sql)
text2sql_prompt = DEFAULT_TEXT_TO_SQL_PROMPT.partial_format(
dialect=engine.dialect.name
)
print(text2sql_prompt.template)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n"
"SQL: {sql_query}\n"
"SQL Response: {context_str}\n"
"Response: "
)
response_synthesis_prompt = PromptTemplate(
response_synthesis_prompt_str,
)
llm = OpenAI(model="gpt-3.5-turbo")
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
CustomQueryComponent,
)
qp = QP(
modules={
"input": InputComponent(),
"table_retriever": obj_retriever,
"table_output_parser": table_parser_component,
"text2sql_prompt": text2sql_prompt,
"text2sql_llm": llm,
"sql_output_parser": sql_parser_component,
"sql_retriever": sql_retriever,
"response_synthesis_prompt": response_synthesis_prompt,
"response_synthesis_llm": llm,
},
verbose=True,
)
qp.add_chain(["input", "table_retriever", "table_output_parser"])
qp.add_link("input", "text2sql_prompt", dest_key="query_str")
qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema")
qp.add_chain(
["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"]
)
qp.add_link(
"sql_output_parser", "response_synthesis_prompt", dest_key="sql_query"
)
qp.add_link(
"sql_retriever", "response_synthesis_prompt", dest_key="context_str"
)
qp.add_link("input", "response_synthesis_prompt", dest_key="query_str")
qp.add_link("response_synthesis_prompt", "response_synthesis_llm")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.dag)
net.show("text2sql_dag.html")
response = qp.run(
query="What was the year that The Notorious B.I.G was signed to Bad Boy?"
)
print(str(response))
response = qp.run(query="Who won best director in the 1972 academy awards")
print(str(response))
response = qp.run(query="What was the term of Pasquale Preziosa?")
print(str(response))
from llama_index.core import VectorStoreIndex, load_index_from_storage
from sqlalchemy import text
from llama_index.core.schema import TextNode
from llama_index.core import StorageContext
import os
from pathlib import Path
from typing import Dict
def index_all_tables(
sql_database: SQLDatabase, table_index_dir: str = "table_index_dir"
) -> Dict[str, VectorStoreIndex]:
"""Index all tables."""
if not Path(table_index_dir).exists():
os.makedirs(table_index_dir)
vector_index_dict = {}
engine = sql_database.engine
for table_name in sql_database.get_usable_table_names():
print(f"Indexing rows in table: {table_name}")
if not os.path.exists(f"{table_index_dir}/{table_name}"):
with engine.connect() as conn:
cursor = conn.execute(text(f'SELECT * FROM "{table_name}"'))
result = cursor.fetchall()
row_tups = []
for row in result:
row_tups.append(tuple(row))
nodes = [TextNode(text=str(t)) for t in row_tups]
index = VectorStoreIndex(nodes)
index.set_index_id("vector_index")
index.storage_context.persist(f"{table_index_dir}/{table_name}")
else:
storage_context = StorageContext.from_defaults(
persist_dir=f"{table_index_dir}/{table_name}"
)
index = load_index_from_storage(
storage_context, index_id="vector_index"
)
vector_index_dict[table_name] = index
return vector_index_dict
vector_index_dict = index_all_tables(sql_database)
test_retriever = vector_index_dict["Bad_Boy_Artists"].as_retriever(
similarity_top_k=1
)
nodes = test_retriever.retrieve("P. Diddy")
print(nodes[0].get_content())
from llama_index.core.retrievers import SQLRetriever
from typing import List
from llama_index.core.query_pipeline import FnComponent
sql_retriever = SQLRetriever(sql_database)
def get_table_context_and_rows_str(
query_str: str, table_schema_objs: List[SQLTableSchema]
):
"""Get table context string."""
context_strs = []
for table_schema_obj in table_schema_objs:
table_info = sql_database.get_single_table_info(
table_schema_obj.table_name
)
if table_schema_obj.context_str:
table_opt_context = " The table description is: "
table_opt_context += table_schema_obj.context_str
table_info += table_opt_context
vector_retriever = vector_index_dict[
table_schema_obj.table_name
].as_retriever(similarity_top_k=2)
relevant_nodes = vector_retriever.retrieve(query_str)
if len(relevant_nodes) > 0:
table_row_context = "\nHere are some relevant example rows (values in the same order as columns above)\n"
for node in relevant_nodes:
table_row_context += str(node.get_content()) + "\n"
table_info += table_row_context
context_strs.append(table_info)
return "\n\n".join(context_strs)
table_parser_component = | FnComponent(fn=get_table_context_and_rows_str) | llama_index.core.query_pipeline.FnComponent |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
import getpass
import openai
openai.api_key = "sk-"
import chromadb
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.chroma import ChromaVectorStore
from IPython.display import Markdown, display
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
from llama_index.core import StorageContext
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
filters = MetadataFilters(
filters=[
| MetadataFilter(key="theme", value="Mafia") | llama_index.core.vector_stores.MetadataFilter |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-azure-openai')
get_ipython().system('pip install openai')
import os
os.environ["AZURE_OPENAI_API_KEY"] = "<your-api-key>"
os.environ[
"AZURE_OPENAI_ENDPOINT"
] = "https://<your-resource-name>.openai.azure.com/"
os.environ["OPENAI_API_VERSION"] = "2023-12-01-preview"
from llama_index.multi_modal_llms.azure_openai import AzureOpenAIMultiModal
azure_openai_mm_llm = AzureOpenAIMultiModal(
engine="gpt-4-vision-preview",
api_version="2023-12-01-preview",
model="gpt-4-vision-preview",
max_new_tokens=300,
)
azure_openai_mm_llm = AzureOpenAIMultiModal(
azure_endpoint="https://<your-endpoint>.openai.azure.com",
engine="gpt-4-vision-preview",
api_version="2023-12-01-preview",
model="gpt-4-vision-preview",
max_new_tokens=300,
)
import base64
import requests
from llama_index.core.schema import ImageDocument
image_url = "https://www.visualcapitalist.com/wp-content/uploads/2023/10/US_Mortgage_Rate_Surge-Sept-11-1.jpg"
response = requests.get(image_url)
if response.status_code != 200:
raise ValueError("Error: Could not retrieve image from URL.")
base64str = base64.b64encode(response.content).decode("utf-8")
image_document = | ImageDocument(image=base64str, image_mimetype="image/jpeg") | llama_index.core.schema.ImageDocument |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.graphql.base import GraphQLToolSpec
url = "https://spacex-production.up.railway.app/"
headers = {
"content-type": "application/json",
}
graphql_spec = | GraphQLToolSpec(url=url, headers=headers) | llama_index.tools.graphql.base.GraphQLToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-readers-dashvector')
get_ipython().system('pip install llama-index')
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
api_key = os.environ["DASHVECTOR_API_KEY"]
from llama_index.readers.dashvector import DashVectorReader
reader = | DashVectorReader(api_key=api_key) | llama_index.readers.dashvector.DashVectorReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate')
get_ipython().run_line_magic('pip', 'install unstructured replicate')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
import os
REPLICATE_API_TOKEN = "..." # Your Relicate API token here
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1UU0xc3uLXs-WG0aDQSXjGacUkp142rLS" -O texas.jpg')
from llama_index.readers.file import FlatReader
from pathlib import Path
from llama_index.core.node_parser import UnstructuredElementNodeParser
reader = FlatReader()
docs_2021 = reader.load_data(Path("tesla_2021_10k.htm"))
node_parser = | UnstructuredElementNodeParser() | llama_index.core.node_parser.UnstructuredElementNodeParser |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.program.openai import OpenAIPydanticProgram
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm = OpenAI(model="gpt-4", callback_manager=callback_manager)
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
program = OpenAIPydanticProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
llm=llm,
verbose=False,
)
movie_names = [
"The Shining",
"The Departed",
"Titanic",
"Goodfellas",
"Pretty Woman",
"Home Alone",
"Caged Fury",
"Edward Scissorhands",
"Total Recall",
"Ghost",
"Tremors",
"RoboCop",
"Rocky V",
]
from tqdm.notebook import tqdm
for movie_name in tqdm(movie_names):
output = program(movie_name=movie_name)
print(output.json())
finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl")
get_ipython().system('cat mock_finetune_songs.jsonl')
from llama_index.finetuning import OpenAIFinetuneEngine
finetune_engine = OpenAIFinetuneEngine(
"gpt-3.5-turbo",
"mock_finetune_songs.jsonl",
validate_json=False, # openai validate json code doesn't support function calling yet
)
finetune_engine.finetune()
finetune_engine.get_current_job()
ft_llm = finetune_engine.get_finetuned_model(temperature=0.3)
ft_program = OpenAIPydanticProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
llm=ft_llm,
verbose=False,
)
ft_program(movie_name="Goodfellas")
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pydantic import Field
from typing import List
class Citation(BaseModel):
"""Citation class."""
author: str = Field(
..., description="Inferred first author (usually last name"
)
year: int = Field(..., description="Inferred year")
desc: str = Field(
...,
description=(
"Inferred description from the text of the work that the author is"
" cited for"
),
)
class Response(BaseModel):
"""List of author citations.
Extracted over unstructured text.
"""
citations: List[Citation] = Field(
...,
description=(
"List of author citations (organized by author, year, and"
" description)."
),
)
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from pathlib import Path
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
metadata = {
"paper_title": "Llama 2: Open Foundation and Fine-Tuned Chat Models"
}
docs = [Document(text=doc_text, metadata=metadata)]
chunk_size = 1024
node_parser = | SentenceSplitter(chunk_size=chunk_size) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-4")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
from llama_index.core import VectorStoreIndex
from llama_index.agent.openai import OpenAIAgent
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core import VectorStoreIndex
tool_dict = {}
for wiki_title in wiki_titles:
vector_index = VectorStoreIndex.from_documents(
city_docs[wiki_title],
)
vector_query_engine = vector_index.as_query_engine(llm=llm)
vector_tool = QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name=wiki_title,
description=("Useful for questions related to" f" {wiki_title}"),
),
)
tool_dict[wiki_title] = vector_tool
from llama_index.core import VectorStoreIndex
from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping
tool_mapping = SimpleToolNodeMapping.from_objects(list(tool_dict.values()))
tool_index = ObjectIndex.from_objects(
list(tool_dict.values()),
tool_mapping,
VectorStoreIndex,
)
tool_retriever = tool_index.as_retriever(similarity_top_k=1)
from llama_index.core.llms import ChatMessage
from llama_index.core import ChatPromptTemplate
from typing import List
GEN_SYS_PROMPT_STR = """\
Task information is given below.
Given the task, please generate a system prompt for an OpenAI-powered bot to solve this task:
{task} \
"""
gen_sys_prompt_messages = [
ChatMessage(
role="system",
content="You are helping to build a system prompt for another bot.",
),
ChatMessage(role="user", content=GEN_SYS_PROMPT_STR),
]
GEN_SYS_PROMPT_TMPL = ChatPromptTemplate(gen_sys_prompt_messages)
agent_cache = {}
def create_system_prompt(task: str):
"""Create system prompt for another agent given an input task."""
llm = OpenAI(llm="gpt-4")
fmt_messages = GEN_SYS_PROMPT_TMPL.format_messages(task=task)
response = llm.chat(fmt_messages)
return response.message.content
def get_tools(task: str):
"""Get the set of relevant tools to use given an input task."""
subset_tools = tool_retriever.retrieve(task)
return [t.metadata.name for t in subset_tools]
def create_agent(system_prompt: str, tool_names: List[str]):
"""Create an agent given a system prompt and an input set of tools."""
llm = OpenAI(model="gpt-4")
try:
input_tools = [tool_dict[tn] for tn in tool_names]
agent = | OpenAIAgent.from_tools(input_tools, llm=llm, verbose=True) | llama_index.agent.openai.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index')
from llama_index.core import SummaryIndex
from llama_index.readers.web import SimpleWebPageReader
from IPython.display import Markdown, display
import os
documents = SimpleWebPageReader(html_to_text=True).load_data(
["http://paulgraham.com/worked.html"]
)
documents[0]
index = SummaryIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.readers.web import TrafilaturaWebReader
documents = TrafilaturaWebReader().load_data(
["http://paulgraham.com/worked.html"]
)
index = SummaryIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.core import SummaryIndex
from llama_index.readers.web import RssReader
documents = | RssReader() | llama_index.readers.web.RssReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-github')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index llama-hub')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["GITHUB_TOKEN"] = "ghp_..."
os.environ["OPENAI_API_KEY"] = "sk-..."
import os
from llama_index.readers.github import (
GitHubRepositoryIssuesReader,
GitHubIssuesClient,
)
github_client = | GitHubIssuesClient() | llama_index.readers.github.GitHubIssuesClient |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiply two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = OpenAI(model="gpt-3.5-turbo-instruct")
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
response = agent.chat("What is 20+(2*4)? Calculate step by step ")
response_gen = agent.stream_chat("What is 20+2*4? Calculate step by step")
response_gen.print_response_stream()
llm = OpenAI(model="gpt-4")
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
response = agent.chat("What is 2+2*4")
print(response)
llm = OpenAI(model="gpt-4")
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
prompt_dict = agent.get_prompts()
for k, v in prompt_dict.items():
print(f"Prompt: {k}\n\nValue: {v.template}")
from llama_index.core import PromptTemplate
react_system_header_str = """\
You are designed to help with a variety of tasks, from answering questions \
to providing summaries to other types of analyses.
You have access to a wide variety of tools. You are responsible for using
the tools in any sequence you deem appropriate to complete the task at hand.
This may require breaking the task into subtasks and using different tools
to complete each subtask.
You have access to the following tools:
{tool_desc}
To answer the question, please use the following format.
```
Thought: I need to use a tool to help me answer the question.
Action: tool name (one of {tool_names}) if using a tool.
Action Input: the input to the tool, in a JSON format representing the kwargs (e.g. {{"input": "hello world", "num_beams": 5}})
```
Please ALWAYS start with a Thought.
Please use a valid JSON format for the Action Input. Do NOT do this {{'input': 'hello world', 'num_beams': 5}}.
If this format is used, the user will respond in the following format:
```
Observation: tool response
```
You should keep repeating the above format until you have enough information
to answer the question without using any more tools. At that point, you MUST respond
in the one of the following two formats:
```
Thought: I can answer without using any more tools.
Answer: [your answer here]
```
```
Thought: I cannot answer the question with the provided tools.
Answer: Sorry, I cannot answer your query.
```
- The answer MUST contain a sequence of bullet points that explain how you arrived at the answer. This can include aspects of the previous conversation history.
- You MUST obey the function signature of each tool. Do NOT pass in no arguments if the function expects arguments.
Below is the current conversation consisting of interleaving human and assistant messages.
"""
react_system_prompt = | PromptTemplate(react_system_header_str) | llama_index.core.PromptTemplate |
get_ipython().system('pip install llama-index-multi-modal-llms-anthropic')
get_ipython().system('pip install llama-index-vector-stores-qdrant')
get_ipython().system('pip install matplotlib')
import os
os.environ["ANTHROPIC_API_KEY"] = "" # Your ANTHROPIC API key here
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open("../data/images/prometheus_paper_card.png")
plt.imshow(img)
from llama_index.core import SimpleDirectoryReader
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal
image_documents = SimpleDirectoryReader(
input_files=["../data/images/prometheus_paper_card.png"]
).load_data()
anthropic_mm_llm = AnthropicMultiModal(max_tokens=300)
response = anthropic_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(response)
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
image_urls = [
"https://venturebeat.com/wp-content/uploads/2024/03/Screenshot-2024-03-04-at-12.49.41%E2%80%AFAM.png",
]
img_response = requests.get(image_urls[0])
img = Image.open(BytesIO(img_response.content))
plt.imshow(img)
image_url_documents = load_image_urls(image_urls)
response = anthropic_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_url_documents,
)
print(response)
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader(
input_files=["../data/images/ark_email_sample.PNG"]
).load_data()
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open("../data/images/ark_email_sample.PNG")
plt.imshow(img)
from pydantic import BaseModel
from typing import List
class TickerInfo(BaseModel):
"""List of ticker info."""
direction: str
ticker: str
company: str
shares_traded: int
percent_of_total_etf: float
class TickerList(BaseModel):
"""List of stock tickers."""
fund: str
tickers: List[TickerInfo]
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
Can you get the stock information in the image \
and return the answer? Pick just one fund.
Make sure the answer is a JSON format corresponding to a Pydantic schema. The Pydantic schema is given below.
"""
anthropic_mm_llm = | AnthropicMultiModal(max_tokens=300) | llama_index.multi_modal_llms.anthropic.AnthropicMultiModal |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
from llama_index.llms.openai import OpenAI
from llama_index.core.query_engine import CitationQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = | OpenAIEmbedding(model="text-embedding-3-small") | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system('pip install "google-generativeai" -q')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.llama_dataset import download_llama_dataset
pairwise_evaluator_dataset, _ = download_llama_dataset(
"MtBenchHumanJudgementDataset", "./mt_bench_data"
)
pairwise_evaluator_dataset.to_pandas()[:5]
from llama_index.core.evaluation import PairwiseComparisonEvaluator
from llama_index.llms.openai import OpenAI
from llama_index.llms.gemini import Gemini
from llama_index.llms.cohere import Cohere
llm_gpt4 = OpenAI(temperature=0, model="gpt-4")
llm_gpt35 = OpenAI(temperature=0, model="gpt-3.5-turbo")
llm_gemini = Gemini(model="models/gemini-pro", temperature=0)
evaluators = {
"gpt-4": | PairwiseComparisonEvaluator(llm=llm_gpt4) | llama_index.core.evaluation.PairwiseComparisonEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate')
import os
OPENAI_API_TOKEN = "sk-<your-openai-api-token>"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
REPLICATE_API_TOKEN = "" # Your Relicate API token here
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from pathlib import Path
input_image_path = Path("restaurant_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png')
from pydantic import BaseModel
class Restaurant(BaseModel):
"""Data model for an restaurant."""
restaurant: str
food: str
discount: str
price: str
rating: str
review: str
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader("./restaurant_images").load_data()
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1000
)
from PIL import Image
import matplotlib.pyplot as plt
imageUrl = "./restaurant_images/fried_chicken.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
openai_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Restaurant),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=openai_mm_llm,
verbose=True,
)
response = openai_program()
for res in response:
print(res)
from llama_index.multi_modal_llms.replicate import ReplicateMultiModal
from llama_index.multi_modal_llms.replicate.base import (
REPLICATE_MULTI_MODAL_LLM_MODELS,
)
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
def pydantic_replicate(
model_name, output_class, image_documents, prompt_template_str
):
mm_llm = ReplicateMultiModal(
model=REPLICATE_MULTI_MODAL_LLM_MODELS[model_name],
temperature=0.1,
max_new_tokens=1000,
)
llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(output_class),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=mm_llm,
verbose=True,
)
response = llm_program()
print(f"Model: {model_name}")
for res in response:
print(res)
pydantic_replicate("fuyu-8b", Restaurant, image_documents, prompt_template_str)
pydantic_replicate(
"llava-13b", Restaurant, image_documents, prompt_template_str
)
pydantic_replicate(
"minigpt-4", Restaurant, image_documents, prompt_template_str
)
pydantic_replicate("cogvlm", Restaurant, image_documents, prompt_template_str)
input_image_path = Path("amazon_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1p1Y1qAoM68eC4sAvvHaiJyPhdUZS0Gqb" -O ./amazon_images/amazon.png')
from pydantic import BaseModel
class Product(BaseModel):
"""Data model for a Amazon Product."""
title: str
category: str
discount: str
price: str
rating: str
review: str
description: str
inventory: str
imageUrl = "./amazon_images/amazon.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
amazon_image_documents = SimpleDirectoryReader("./amazon_images").load_data()
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
openai_program_amazon = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Product),
image_documents=amazon_image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=openai_mm_llm,
verbose=True,
)
response = openai_program_amazon()
for res in response:
print(res)
pydantic_replicate(
"fuyu-8b", Product, amazon_image_documents, prompt_template_str
)
pydantic_replicate(
"minigpt-4", Product, amazon_image_documents, prompt_template_str
)
pydantic_replicate(
"cogvlm", Product, amazon_image_documents, prompt_template_str
)
pydantic_replicate(
"llava-13b", Product, amazon_image_documents, prompt_template_str
)
input_image_path = Path("instagram_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=12ZpBBFkYu-jzz1iz356U5kMikn4uN9ww" -O ./instagram_images/jordan.png')
from pydantic import BaseModel
class InsAds(BaseModel):
"""Data model for a Ins Ads."""
account: str
brand: str
product: str
category: str
discount: str
price: str
comments: str
review: str
description: str
from PIL import Image
import matplotlib.pyplot as plt
imageUrl = "./instagram_images/jordan.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
ins_image_documents = SimpleDirectoryReader("./instagram_images").load_data()
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
openai_program_ins = MultiModalLLMCompletionProgram.from_defaults(
output_parser= | PydanticOutputParser(InsAds) | llama_index.core.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
display_source_node(node)
from llama_index.core.tools import RetrieverTool
vector_retriever = VectorIndexRetriever(index)
bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
retriever_tools = [
RetrieverTool.from_defaults(
retriever=vector_retriever,
description="Useful in most cases",
),
RetrieverTool.from_defaults(
retriever=bm25_retriever,
description="Useful if searching about specific information",
),
]
from llama_index.core.retrievers import RouterRetriever
retriever = RouterRetriever.from_defaults(
retriever_tools=retriever_tools,
llm=llm,
select_multi=True,
)
nodes = retriever.retrieve(
"Can you give me all the context regarding the author's life?"
)
for node in nodes:
display_source_node(node)
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import (
VectorStoreIndex,
StorageContext,
SimpleDirectoryReader,
Document,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
llm = OpenAI(model="gpt-3.5-turbo")
splitter = SentenceSplitter(chunk_size=256)
nodes = splitter.get_nodes_from_documents(
[Document(text=documents[0].get_content()[:1000000])]
)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.retrievers.bm25 import BM25Retriever
vector_retriever = index.as_retriever(similarity_top_k=10)
bm25_retriever = | BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=10) | llama_index.retrievers.bm25.BM25Retriever.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-evaluation-tonic-validate')
import json
import pandas as pd
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.evaluation.tonic_validate import (
AnswerConsistencyEvaluator,
AnswerSimilarityEvaluator,
AugmentationAccuracyEvaluator,
AugmentationPrecisionEvaluator,
RetrievalPrecisionEvaluator,
TonicValidateEvaluator,
)
question = "What makes Sam Altman a good founder?"
reference_answer = "He is smart and has a great force of will."
llm_answer = "He is a good founder because he is smart."
retrieved_context_list = [
"Sam Altman is a good founder. He is very smart.",
"What makes Sam Altman such a good founder is his great force of will.",
]
answer_similarity_evaluator = AnswerSimilarityEvaluator()
score = await answer_similarity_evaluator.aevaluate(
question,
llm_answer,
retrieved_context_list,
reference_response=reference_answer,
)
score
answer_consistency_evaluator = | AnswerConsistencyEvaluator() | llama_index.evaluation.tonic_validate.AnswerConsistencyEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
splitter = SentenceSplitter(chunk_size=256)
index = VectorStoreIndex.from_documents(
documents, transformations=[splitter], show_progress=True
)
from llama_index.retrievers.bm25 import BM25Retriever
vector_retriever = index.as_retriever(similarity_top_k=5)
bm25_retriever = BM25Retriever.from_defaults(
docstore=index.docstore, similarity_top_k=10
)
from llama_index.core.retrievers import QueryFusionRetriever
retriever = QueryFusionRetriever(
[vector_retriever, bm25_retriever],
retriever_weights=[0.6, 0.4],
similarity_top_k=10,
num_queries=1, # set this to 1 to disable query generation
mode="relative_score",
use_async=True,
verbose=True,
)
import nest_asyncio
nest_asyncio.apply()
nodes_with_scores = retriever.retrieve(
"What happened at Interleafe and Viaweb?"
)
for node in nodes_with_scores:
print(f"Score: {node.score:.2f} - {node.text[:100]}...\n-----")
from llama_index.core.retrievers import QueryFusionRetriever
retriever = QueryFusionRetriever(
[vector_retriever, bm25_retriever],
retriever_weights=[0.6, 0.4],
similarity_top_k=10,
num_queries=1, # set this to 1 to disable query generation
mode="dist_based_score",
use_async=True,
verbose=True,
)
nodes_with_scores = retriever.retrieve(
"What happened at Interleafe and Viaweb?"
)
for node in nodes_with_scores:
print(f"Score: {node.score:.2f} - {node.text[:100]}...\n-----")
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query("What happened at Interleafe and Viaweb?")
from llama_index.core.response.notebook_utils import display_response
| display_response(response) | llama_index.core.response.notebook_utils.display_response |
from llama_hub.openalex import OpenAlexReader
from llama_index.llms import OpenAI
from llama_index.query_engine import CitationQueryEngine
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.response.notebook_utils import display_response
openalex_reader = OpenAlexReader(email="[email protected]")
query = "biases in large language models"
works = openalex_reader.load_data(query, full_text=False)
service_context = ServiceContext.from_defaults(
llm= | OpenAI(model="gpt-3.5-turbo", temperature=0) | llama_index.llms.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
import os
os.environ["OPENAI_API_KEY"] = "INSERT OPENAI KEY"
get_ipython().system('pip install llama-index')
from llama_index.core import download_loader
from llama_index.readers.wikipedia import WikipediaReader
loader = WikipediaReader()
documents = loader.load_data(pages=["Berlin"])
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
import time
from llama_index.core import VectorStoreIndex
from llama_index.core.postprocessor import SentenceEmbeddingOptimizer
print("Without optimization")
start_time = time.time()
query_engine = index.as_query_engine()
res = query_engine.query("What is the population of Berlin?")
end_time = time.time()
print("Total time elapsed: {}".format(end_time - start_time))
print("Answer: {}".format(res))
print("With optimization")
start_time = time.time()
query_engine = index.as_query_engine(
node_postprocessors=[ | SentenceEmbeddingOptimizer(percentile_cutoff=0.5) | llama_index.core.postprocessor.SentenceEmbeddingOptimizer |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
from llama_index.core import set_global_handler
| set_global_handler("wandb", run_args={"project": "llamaindex"}) | llama_index.core.set_global_handler |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.evaluation.benchmarks import HotpotQAEvaluator
from llama_index.core import VectorStoreIndex
from llama_index.core import Document
from llama_index.llms.openai import OpenAI
from llama_index.core.embeddings import resolve_embed_model
llm = OpenAI(model="gpt-3.5-turbo")
embed_model = resolve_embed_model(
"local:sentence-transformers/all-MiniLM-L6-v2"
)
index = VectorStoreIndex.from_documents(
[Document.example()], embed_model=embed_model, show_progress=True
)
engine = index.as_query_engine(llm=llm)
| HotpotQAEvaluator() | llama_index.core.evaluation.benchmarks.HotpotQAEvaluator |
get_ipython().system('pip install llama-index-multi-modal-llms-anthropic')
get_ipython().system('pip install llama-index-vector-stores-qdrant')
get_ipython().system('pip install matplotlib')
import os
os.environ["ANTHROPIC_API_KEY"] = "" # Your ANTHROPIC API key here
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open("../data/images/prometheus_paper_card.png")
plt.imshow(img)
from llama_index.core import SimpleDirectoryReader
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal
image_documents = SimpleDirectoryReader(
input_files=["../data/images/prometheus_paper_card.png"]
).load_data()
anthropic_mm_llm = AnthropicMultiModal(max_tokens=300)
response = anthropic_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(response)
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
image_urls = [
"https://venturebeat.com/wp-content/uploads/2024/03/Screenshot-2024-03-04-at-12.49.41%E2%80%AFAM.png",
]
img_response = requests.get(image_urls[0])
img = Image.open(BytesIO(img_response.content))
plt.imshow(img)
image_url_documents = load_image_urls(image_urls)
response = anthropic_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_url_documents,
)
print(response)
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader(
input_files=["../data/images/ark_email_sample.PNG"]
).load_data()
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open("../data/images/ark_email_sample.PNG")
plt.imshow(img)
from pydantic import BaseModel
from typing import List
class TickerInfo(BaseModel):
"""List of ticker info."""
direction: str
ticker: str
company: str
shares_traded: int
percent_of_total_etf: float
class TickerList(BaseModel):
"""List of stock tickers."""
fund: str
tickers: List[TickerInfo]
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
Can you get the stock information in the image \
and return the answer? Pick just one fund.
Make sure the answer is a JSON format corresponding to a Pydantic schema. The Pydantic schema is given below.
"""
anthropic_mm_llm = AnthropicMultiModal(max_tokens=300)
llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_cls=TickerList,
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=anthropic_mm_llm,
verbose=True,
)
response = llm_program()
print(str(response))
get_ipython().system('wget "https://www.dropbox.com/scl/fi/c1ec6osn0r2ggnitijqhl/mixed_wiki_images_small.zip?rlkey=swwxc7h4qtwlnhmby5fsnderd&dl=1" -O mixed_wiki_images_small.zip')
get_ipython().system('unzip mixed_wiki_images_small.zip')
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal
anthropic_mm_llm = | AnthropicMultiModal(max_tokens=300) | llama_index.multi_modal_llms.anthropic.AnthropicMultiModal |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = AgentFnComponent(fn=process_response_fn)
def process_agent_response_fn(
task: Task, state: Dict[str, Any], response_dict: dict
):
"""Process agent response."""
return (
AgentChatResponse(response_dict["response_str"]),
response_dict["is_done"],
)
process_agent_response = AgentFnComponent(fn=process_agent_response_fn)
from llama_index.core.query_pipeline import QueryPipeline as QP
from llama_index.llms.openai import OpenAI
qp.add_modules(
{
"agent_input": agent_input_component,
"react_prompt": react_prompt_component,
"llm": OpenAI(model="gpt-4-1106-preview"),
"react_output_parser": parse_react_output,
"run_tool": run_tool,
"process_response": process_response,
"process_agent_response": process_agent_response,
}
)
qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"])
qp.add_link(
"react_output_parser",
"run_tool",
condition_fn=lambda x: not x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link(
"react_output_parser",
"process_response",
condition_fn=lambda x: x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link("process_response", "process_agent_response")
qp.add_link("run_tool", "process_agent_response")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.clean_dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = QueryPipelineAgentWorker(qp)
agent = AgentRunner(
agent_worker, callback_manager=CallbackManager([]), verbose=True
)
task = agent.create_task(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
step_output = agent.run_step(task.task_id)
step_output = agent.run_step(task.task_id)
step_output.is_last
response = agent.finalize_response(task.task_id)
print(str(response))
agent.reset()
response = agent.chat(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
print(str(response))
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
from llama_index.core.agent import Task, AgentChatResponse
from typing import Dict, Any
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
)
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict:
"""Agent input function."""
if "convo_history" not in state:
state["convo_history"] = []
state["count"] = 0
state["convo_history"].append(f"User: {task.input}")
convo_history_str = "\n".join(state["convo_history"]) or "None"
return {"input": task.input, "convo_history": convo_history_str}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core import PromptTemplate
retry_prompt_str = """\
You are trying to generate a proper natural language query given a user input.
This query will then be interpreted by a downstream text-to-SQL agent which
will convert the query to a SQL statement. If the agent triggers an error,
then that will be reflected in the current conversation history (see below).
If the conversation history is None, use the user input. If its not None,
generate a new SQL query that avoids the problems of the previous SQL query.
Input: {input}
Convo history (failed attempts):
{convo_history}
New input: """
retry_prompt = | PromptTemplate(retry_prompt_str) | llama_index.core.PromptTemplate |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.google_calendar.base import GoogleCalendarToolSpec
tool_spec = | GoogleCalendarToolSpec() | llama_index.tools.google_calendar.base.GoogleCalendarToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = PyMuPDFReader().load_data(
file_path="./llama2.pdf", metadata=True
)
attention_docs = PyMuPDFReader().load_data(
file_path="./attention.pdf", metadata=True
)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core.node_parser import TokenTextSplitter
nodes = TokenTextSplitter(
chunk_size=1024, chunk_overlap=128
).get_nodes_from_documents(llama2_docs + attention_docs)
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes=nodes)
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=docstore, similarity_top_k=2
)
from llama_index.core.schema import IndexNode
vector_obj = IndexNode(
index_id="vector", obj=vector_retriever, text="Vector Retriever"
)
bm25_obj = IndexNode(
index_id="bm25", obj=bm25_retriever, text="BM25 Retriever"
)
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(objects=[vector_obj, bm25_obj])
query_engine = summary_index.as_query_engine(
response_mode="tree_summarize", verbose=True
)
response = await query_engine.aquery(
"How does attention work in transformers?"
)
print(str(response))
response = await query_engine.aquery(
"What is the architecture of Llama2 based on?"
)
print(str(response))
response = await query_engine.aquery(
"What was used before attention in transformers?"
)
print(str(response))
docstore.persist("./docstore.json")
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
docstore = SimpleDocumentStore.from_persist_path("./docstore.json")
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
index = | VectorStoreIndex.from_vector_store(vector_store) | llama_index.core.VectorStoreIndex.from_vector_store |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.storage.kvstore.firestore import FirestoreKVStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.index_store.firestore import FirestoreIndexStore
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore= | FirestoreDocumentStore(kvstore) | llama_index.storage.docstore.firestore.FirestoreDocumentStore |
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-api')
get_ipython().system('pip install llama-index')
from llama_index.llms.llama_api import LlamaAPI
api_key = "LL-your-key"
llm = | LlamaAPI(api_key=api_key) | llama_index.llms.llama_api.LlamaAPI |
get_ipython().run_line_magic('pip', 'install llama-index-readers-github')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index llama-hub')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["GITHUB_TOKEN"] = "ghp_..."
os.environ["OPENAI_API_KEY"] = "sk-..."
import os
from llama_index.readers.github import (
GitHubRepositoryIssuesReader,
GitHubIssuesClient,
)
github_client = GitHubIssuesClient()
loader = GitHubRepositoryIssuesReader(
github_client,
owner="run-llama",
repo="llama_index",
verbose=True,
)
orig_docs = loader.load_data()
limit = 100
docs = []
for idx, doc in enumerate(orig_docs):
doc.metadata["index_id"] = int(doc.id_)
if idx >= limit:
break
docs.append(doc)
import weaviate
auth_config = weaviate.AuthApiKey(
api_key="XRa15cDIkYRT7AkrpqT6jLfE4wropK1c1TGk"
)
client = weaviate.Client(
"https://llama-index-test-v0oggsoz.weaviate.network",
auth_client_secret=auth_config,
)
class_name = "LlamaIndex_docs"
client.schema.delete_class(class_name)
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name=class_name
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
doc_index = VectorStoreIndex.from_documents(
docs, storage_context=storage_context
)
from llama_index.core import SummaryIndex
from llama_index.core.async_utils import run_jobs
from llama_index.llms.openai import OpenAI
from llama_index.core.schema import IndexNode
from llama_index.core.vector_stores import (
FilterOperator,
MetadataFilter,
MetadataFilters,
)
async def aprocess_doc(doc, include_summary: bool = True):
"""Process doc."""
metadata = doc.metadata
date_tokens = metadata["created_at"].split("T")[0].split("-")
year = int(date_tokens[0])
month = int(date_tokens[1])
day = int(date_tokens[2])
assignee = (
"" if "assignee" not in doc.metadata else doc.metadata["assignee"]
)
size = ""
if len(doc.metadata["labels"]) > 0:
size_arr = [l for l in doc.metadata["labels"] if "size:" in l]
size = size_arr[0].split(":")[1] if len(size_arr) > 0 else ""
new_metadata = {
"state": metadata["state"],
"year": year,
"month": month,
"day": day,
"assignee": assignee,
"size": size,
}
summary_index = SummaryIndex.from_documents([doc])
query_str = "Give a one-sentence concise summary of this issue."
query_engine = summary_index.as_query_engine(
llm=OpenAI(model="gpt-3.5-turbo")
)
summary_txt = await query_engine.aquery(query_str)
summary_txt = str(summary_txt)
index_id = doc.metadata["index_id"]
filters = MetadataFilters(
filters=[
MetadataFilter(
key="index_id", operator=FilterOperator.EQ, value=int(index_id)
),
]
)
index_node = IndexNode(
text=summary_txt,
metadata=new_metadata,
obj=doc_index.as_retriever(filters=filters),
index_id=doc.id_,
)
return index_node
async def aprocess_docs(docs):
"""Process metadata on docs."""
index_nodes = []
tasks = []
for doc in docs:
task = aprocess_doc(doc)
tasks.append(task)
index_nodes = await run_jobs(tasks, show_progress=True, workers=3)
return index_nodes
index_nodes = await aprocess_docs(docs)
index_nodes[5].metadata
import weaviate
auth_config = weaviate.AuthApiKey(
api_key="XRa15cDIkYRT7AkrpqT6jLfE4wropK1c1TGk"
)
client = weaviate.Client(
"https://llama-index-test-v0oggsoz.weaviate.network",
auth_client_secret=auth_config,
)
class_name = "LlamaIndex_auto"
client.schema.delete_class(class_name)
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
vector_store_auto = WeaviateVectorStore(
weaviate_client=client, index_name=class_name
)
storage_context_auto = StorageContext.from_defaults(
vector_store=vector_store_auto
)
index = VectorStoreIndex(
objects=index_nodes, storage_context=storage_context_auto
)
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="Github Issues",
metadata_info=[
MetadataInfo(
name="state",
description="Whether the issue is `open` or `closed`",
type="string",
),
MetadataInfo(
name="year",
description="The year issue was created",
type="integer",
),
MetadataInfo(
name="month",
description="The month issue was created",
type="integer",
),
MetadataInfo(
name="day",
description="The day issue was created",
type="integer",
),
MetadataInfo(
name="assignee",
description="The assignee of the ticket",
type="string",
),
MetadataInfo(
name="size",
description="How big the issue is (XS, S, M, L, XL, XXL)",
type="string",
),
],
)
from llama_index.core.retrievers import VectorIndexAutoRetriever
retriever = VectorIndexAutoRetriever(
index,
vector_store_info=vector_store_info,
similarity_top_k=2,
empty_query_top_k=10, # if only metadata filters are specified, this is the limit
verbose=True,
)
from llama_index.core import QueryBundle
nodes = retriever.retrieve( | QueryBundle("Tell me about some issues on 01/11") | llama_index.core.QueryBundle |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from IPython.display import Markdown, display
import chromadb
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
db2 = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db2.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(
vector_store,
embed_model=embed_model,
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
import chromadb
remote_db = chromadb.HttpClient()
chroma_collection = remote_db.get_or_create_collection("quickstart")
vector_store = | ChromaVectorStore(chroma_collection=chroma_collection) | llama_index.vector_stores.chroma.ChromaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-packs-cogniswitch-agent')
from llama_index.packs.cogniswitch_agent import CogniswitchAgentPack
import os
import warnings
warnings.filterwarnings("ignore")
cogniswitch_tool_kwargs = {"cs_token": cs_token, "apiKey": oauth_token}
cogniswitch_agent_pack = | CogniswitchAgentPack(cogniswitch_tool_kwargs) | llama_index.packs.cogniswitch_agent.CogniswitchAgentPack |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo-instruct", temperature=0.1)
Settings.embed_model = | HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5") | llama_index.embeddings.huggingface.HuggingFaceEmbedding |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = | ReActOutputParser() | llama_index.core.agent.react.output_parser.ReActOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lantern')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install psycopg2-binary llama-index asyncpg')
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.lantern import LanternVectorStore
import textwrap
import openai
import os
os.environ["OPENAI_API_KEY"] = "<your_key>"
openai.api_key = "<your_key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
print("Document ID:", documents[0].doc_id)
import psycopg2
connection_string = "postgresql://postgres:postgres@localhost:5432"
db_name = "postgres"
conn = psycopg2.connect(connection_string)
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {db_name}")
c.execute(f"CREATE DATABASE {db_name}")
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.embed_model = | OpenAIEmbedding(model="text-embedding-3-small") | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY')
get_ipython().system('pip install llama-index pypdf')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [
SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes
]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm)
response = query_engine_chunk.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
)
extractors = [
SummaryExtractor(summaries=["self"], show_progress=True),
QuestionsAnsweredExtractor(questions=5, show_progress=True),
]
node_to_metadata = {}
for extractor in extractors:
metadata_dicts = extractor.extract(base_nodes)
for node, metadata in zip(base_nodes, metadata_dicts):
if node.node_id not in node_to_metadata:
node_to_metadata[node.node_id] = metadata
else:
node_to_metadata[node.node_id].update(metadata)
def save_metadata_dicts(path, data):
with open(path, "w") as fp:
json.dump(data, fp)
def load_metadata_dicts(path):
with open(path, "r") as fp:
data = json.load(fp)
return data
save_metadata_dicts("data/llama2_metadata_dicts.json", node_to_metadata)
metadata_dicts = load_metadata_dicts("data/llama2_metadata_dicts.json")
import copy
all_nodes = copy.deepcopy(base_nodes)
for node_id, metadata in node_to_metadata.items():
for val in metadata.values():
all_nodes.append(IndexNode(text=val, index_id=node_id))
all_nodes_dict = {n.node_id: n for n in all_nodes}
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
vector_index_metadata = VectorStoreIndex(all_nodes)
vector_retriever_metadata = vector_index_metadata.as_retriever(
similarity_top_k=2
)
retriever_metadata = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_metadata},
node_dict=all_nodes_dict,
verbose=False,
)
nodes = retriever_metadata.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_metadata = RetrieverQueryEngine.from_args(
retriever_metadata, llm=llm
)
response = query_engine_metadata.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
from llama_index.core.evaluation import (
generate_question_context_pairs,
EmbeddingQAFinetuneDataset,
)
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_dataset = generate_question_context_pairs(
base_nodes, OpenAI(model="gpt-3.5-turbo")
)
eval_dataset.save_json("data/llama2_eval_dataset.json")
eval_dataset = EmbeddingQAFinetuneDataset.from_json(
"data/llama2_eval_dataset.json"
)
import pandas as pd
from llama_index.core.evaluation import (
RetrieverEvaluator,
get_retrieval_results_df,
)
top_k = 10
def display_results(names, results_arr):
"""Display results from evaluate."""
hit_rates = []
mrrs = []
for name, eval_results in zip(names, results_arr):
metric_dicts = []
for eval_result in eval_results:
metric_dict = eval_result.metric_vals_dict
metric_dicts.append(metric_dict)
results_df = pd.DataFrame(metric_dicts)
hit_rate = results_df["hit_rate"].mean()
mrr = results_df["mrr"].mean()
hit_rates.append(hit_rate)
mrrs.append(mrr)
final_df = pd.DataFrame(
{"retrievers": names, "hit_rate": hit_rates, "mrr": mrrs}
)
display(final_df)
vector_retriever_chunk = vector_index_chunk.as_retriever(
similarity_top_k=top_k
)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
retriever_evaluator = RetrieverEvaluator.from_metric_names(
["mrr", "hit_rate"], retriever=retriever_chunk
)
results_chunk = await retriever_evaluator.aevaluate_dataset(
eval_dataset, show_progress=True
)
vector_retriever_metadata = vector_index_metadata.as_retriever(
similarity_top_k=top_k
)
retriever_metadata = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_metadata},
node_dict=all_nodes_dict,
verbose=True,
)
retriever_evaluator = RetrieverEvaluator.from_metric_names(
["mrr", "hit_rate"], retriever=retriever_metadata
)
results_metadata = await retriever_evaluator.aevaluate_dataset(
eval_dataset, show_progress=True
)
base_retriever = base_index.as_retriever(similarity_top_k=top_k)
retriever_evaluator = RetrieverEvaluator.from_metric_names(
["mrr", "hit_rate"], retriever=base_retriever
)
results_base = await retriever_evaluator.aevaluate_dataset(
eval_dataset, show_progress=True
)
full_results_df = | get_retrieval_results_df(
[
"Base Retriever",
"Retriever (Chunk References)",
"Retriever (Metadata References) | llama_index.core.evaluation.get_retrieval_results_df |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
)
from llama_index.core.query_engine.pandas import PandasInstructionParser
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'")
import pandas as pd
df = pd.read_csv("./titanic_train.csv")
instruction_str = (
"1. Convert the query to executable Python code using Pandas.\n"
"2. The final line of code should be a Python expression that can be called with the `eval()` function.\n"
"3. The code should represent a solution to the query.\n"
"4. PRINT ONLY THE EXPRESSION.\n"
"5. Do not quote the expression.\n"
)
pandas_prompt_str = (
"You are working with a pandas dataframe in Python.\n"
"The name of the dataframe is `df`.\n"
"This is the result of `print(df.head())`:\n"
"{df_str}\n\n"
"Follow these instructions:\n"
"{instruction_str}\n"
"Query: {query_str}\n\n"
"Expression:"
)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n\n"
"Pandas Instructions (optional):\n{pandas_instructions}\n\n"
"Pandas Output: {pandas_output}\n\n"
"Response: "
)
pandas_prompt = PromptTemplate(pandas_prompt_str).partial_format(
instruction_str=instruction_str, df_str=df.head(5)
)
pandas_output_parser = PandasInstructionParser(df)
response_synthesis_prompt = PromptTemplate(response_synthesis_prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
qp = QP(
modules={
"input": | InputComponent() | llama_index.core.query_pipeline.InputComponent |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().system('pip install llama-index gradientai -q')
import os
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.finetuning import GradientFinetuneEngine
os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY")
os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>"
from pydantic import BaseModel
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.llms.openai import OpenAI
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
openai_handler = LlamaDebugHandler()
openai_callback = CallbackManager([openai_handler])
openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback)
gradient_handler = LlamaDebugHandler()
gradient_callback = CallbackManager([gradient_handler])
base_model_slug = "llama2-7b-chat"
gradient_llm = GradientBaseModelLLM(
base_model_slug=base_model_slug,
max_tokens=300,
callback_manager=gradient_callback,
is_chat_model=True,
)
from llama_index.core.llms import LLMMetadata
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
openai_program = LLMTextCompletionProgram.from_defaults(
output_parser= | PydanticOutputParser(Album) | llama_index.core.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.node_parser import SentenceWindowNodeParser
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
text_splitter = SentenceSplitter()
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-mpnet-base-v2", max_length=512
)
from llama_index.core import Settings
Settings.llm = llm
Settings.embed_model = embed_model
Settings.text_splitter = text_splitter
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["./IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
nodes = node_parser.get_nodes_from_documents(documents)
base_nodes = text_splitter.get_nodes_from_documents(documents)
from llama_index.core import VectorStoreIndex
sentence_index = VectorStoreIndex(nodes)
base_index = VectorStoreIndex(base_nodes)
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
query_engine = sentence_index.as_query_engine(
similarity_top_k=2,
node_postprocessors=[
MetadataReplacementPostProcessor(target_metadata_key="window")
],
)
window_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(window_response)
window = window_response.source_nodes[0].node.metadata["window"]
sentence = window_response.source_nodes[0].node.metadata["original_text"]
print(f"Window: {window}")
print("------------------")
print(f"Original Sentence: {sentence}")
query_engine = base_index.as_query_engine(similarity_top_k=2)
vector_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(vector_response)
query_engine = base_index.as_query_engine(similarity_top_k=5)
vector_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(vector_response)
for source_node in window_response.source_nodes:
print(source_node.node.metadata["original_text"])
print("--------")
for node in vector_response.source_nodes:
print("AMOC mentioned?", "AMOC" in node.node.text)
print("--------")
print(vector_response.source_nodes[2].node.text)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
import random
nest_asyncio.apply()
len(base_nodes)
num_nodes_eval = 30
sample_eval_nodes = random.sample(base_nodes[:200], num_nodes_eval)
dataset_generator = DatasetGenerator(
sample_eval_nodes,
llm=OpenAI(model="gpt-4"),
show_progress=True,
num_questions_per_chunk=2,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes()
eval_dataset.save_json("data/ipcc_eval_qr_dataset.json")
eval_dataset = | QueryResponseDataset.from_json("data/ipcc_eval_qr_dataset.json") | llama_index.core.evaluation.QueryResponseDataset.from_json |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("../data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 1024
nodes = Settings.node_parser.get_nodes_from_documents(documents)
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
from llama_index.core import SummaryIndex
from llama_index.core import VectorStoreIndex
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
list_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = vector_index.as_query_engine()
from llama_index.core.tools import QueryEngineTool
list_tool = QueryEngineTool.from_defaults(
query_engine=list_query_engine,
description=(
"Useful for summarization questions related to Paul Graham eassy on"
" What I Worked On."
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context from Paul Graham essay on What"
" I Worked On."
),
)
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
query_engine = RouterQueryEngine(
selector=PydanticSingleSelector.from_defaults(),
query_engine_tools=[
list_tool,
vector_tool,
],
)
response = query_engine.query("What is the summary of the document?")
print(str(response))
response = query_engine.query("What did Paul Graham do after RICS?")
print(str(response))
query_engine = RouterQueryEngine(
selector=LLMSingleSelector.from_defaults(),
query_engine_tools=[
list_tool,
vector_tool,
],
)
response = query_engine.query("What is the summary of the document?")
print(str(response))
response = query_engine.query("What did Paul Graham do after RICS?")
print(str(response))
print(str(response.metadata["selector_result"]))
from llama_index.core import SimpleKeywordTableIndex
keyword_index = | SimpleKeywordTableIndex(nodes, storage_context=storage_context) | llama_index.core.SimpleKeywordTableIndex |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.readers.file import PDFReader
reader = PDFReader()
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
docs = reader.load_data("./data/10k/lyft_2021.pdf")
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceSplitter()
nodes = node_parser.get_nodes_from_documents(docs)
print(nodes[8].get_content(metadata_mode="all"))
get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet')
from pgvector.sqlalchemy import Vector
from sqlalchemy import insert, create_engine, String, text, Integer
from sqlalchemy.orm import declarative_base, mapped_column
engine = create_engine("postgresql+psycopg2://localhost/postgres")
with engine.connect() as conn:
conn.execute(text("CREATE EXTENSION IF NOT EXISTS vector"))
conn.commit()
Base = declarative_base()
class SECTextChunk(Base):
__tablename__ = "sec_text_chunk"
id = mapped_column(Integer, primary_key=True)
page_label = mapped_column(Integer)
file_name = mapped_column(String)
text = mapped_column(String)
embedding = mapped_column(Vector(384))
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
for node in nodes:
text_embedding = embed_model.get_text_embedding(node.get_content())
node.embedding = text_embedding
for node in nodes:
row_dict = {
"text": node.get_content(),
"embedding": node.embedding,
**node.metadata,
}
stmt = insert(SECTextChunk).values(**row_dict)
with engine.connect() as connection:
cursor = connection.execute(stmt)
connection.commit()
from llama_index.core import PromptTemplate
text_to_sql_tmpl = """\
Given an input question, first create a syntactically correct {dialect} \
query to run, then look at the results of the query and return the answer. \
You can order the results by a relevant column to return the most \
interesting examples in the database.
Pay attention to use only the column names that you can see in the schema \
description. Be careful to not query for columns that do not exist. \
Pay attention to which column is in which table. Also, qualify column names \
with the table name when needed.
IMPORTANT NOTE: you can use specialized pgvector syntax (`<->`) to do nearest \
neighbors/semantic search to a given vector from an embeddings column in the table. \
The embeddings value for a given row typically represents the semantic meaning of that row. \
The vector represents an embedding representation \
of the question, given below. Do NOT fill in the vector values directly, but rather specify a \
`[query_vector]` placeholder. For instance, some select statement examples below \
(the name of the embeddings column is `embedding`):
SELECT * FROM items ORDER BY embedding <-> '[query_vector]' LIMIT 5;
SELECT * FROM items WHERE id != 1 ORDER BY embedding <-> (SELECT embedding FROM items WHERE id = 1) LIMIT 5;
SELECT * FROM items WHERE embedding <-> '[query_vector]' < 5;
You are required to use the following format, \
each taking one line:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
Only use tables listed below.
{schema}
Question: {query_str}
SQLQuery: \
"""
text_to_sql_prompt = PromptTemplate(text_to_sql_tmpl)
from llama_index.core import SQLDatabase
from llama_index.llms.openai import OpenAI
from llama_index.core.query_engine import PGVectorSQLQueryEngine
from llama_index.core import Settings
sql_database = | SQLDatabase(engine, include_tables=["sec_text_chunk"]) | llama_index.core.SQLDatabase |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
TABLE_NAME = os.environ["DYNAMODB_TABLE_NAME"]
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
from llama_index.storage.index_store.dynamodb import DynamoDBIndexStore
from llama_index.vector_stores.dynamodb import DynamoDBVectorStore
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME),
vector_store=DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore= | DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME) | llama_index.storage.docstore.dynamodb.DynamoDBDocumentStore.from_table_name |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-zep')
get_ipython().system('pip install llama-index')
import logging
import sys
from uuid import uuid4
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.zep import ZepVectorStore
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("../data/paul_graham/").load_data()
from llama_index.core import StorageContext
zep_api_url = "http://localhost:8000"
collection_name = f"graham{uuid4().hex}"
vector_store = ZepVectorStore(
api_url=zep_api_url,
collection_name=collection_name,
embedding_dimensions=1536,
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
},
),
]
collection_name = f"movies{uuid4().hex}"
vector_store = ZepVectorStore(
api_url=zep_api_url,
collection_name=collection_name,
embedding_dimensions=1536,
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
filters = MetadataFilters(
filters=[ | ExactMatchFilter(key="theme", value="Mafia") | llama_index.core.vector_stores.ExactMatchFilter |