prompt
stringlengths 70
19.8k
| completion
stringlengths 8
1.03k
| api
stringlengths 23
93
|
---|---|---|
get_ipython().system('pip install llama-index')
get_ipython().system('pip install wget')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-azureaisearch')
get_ipython().run_line_magic('pip', 'install azure-search-documents==11.4.0')
get_ipython().run_line_magic('llama-index-embeddings-azure-openai', '')
get_ipython().run_line_magic('llama-index-llms-azure-openai', '')
import logging
import sys
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from IPython.display import Markdown, display
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.core.settings import Settings
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.vector_stores.azureaisearch import AzureAISearchVectorStore
from llama_index.vector_stores.azureaisearch import (
IndexManagement,
MetadataIndexFieldType,
)
aoai_api_key = "YOUR_AZURE_OPENAI_API_KEY"
aoai_endpoint = "YOUR_AZURE_OPENAI_ENDPOINT"
aoai_api_version = "2023-05-15"
llm = AzureOpenAI(
model="YOUR_AZURE_OPENAI_COMPLETION_MODEL_NAME",
deployment_name="YOUR_AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME",
api_key=aoai_api_key,
azure_endpoint=aoai_endpoint,
api_version=aoai_api_version,
)
embed_model = AzureOpenAIEmbedding(
model="YOUR_AZURE_OPENAI_EMBEDDING_MODEL_NAME",
deployment_name="YOUR_AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME",
api_key=aoai_api_key,
azure_endpoint=aoai_endpoint,
api_version=aoai_api_version,
)
search_service_api_key = "YOUR-AZURE-SEARCH-SERVICE-ADMIN-KEY"
search_service_endpoint = "YOUR-AZURE-SEARCH-SERVICE-ENDPOINT"
search_service_api_version = "2023-11-01"
credential = AzureKeyCredential(search_service_api_key)
index_name = "llamaindex-vector-demo"
index_client = SearchIndexClient(
endpoint=search_service_endpoint,
credential=credential,
)
search_client = SearchClient(
endpoint=search_service_endpoint,
index_name=index_name,
credential=credential,
)
metadata_fields = {
"author": "author",
"theme": ("topic", MetadataIndexFieldType.STRING),
"director": "director",
}
vector_store = AzureAISearchVectorStore(
search_or_index_client=index_client,
filterable_metadata_field_keys=metadata_fields,
index_name=index_name,
index_management=IndexManagement.CREATE_IF_NOT_EXISTS,
id_field_key="id",
chunk_field_key="chunk",
embedding_field_key="embedding",
embedding_dimensionality=1536,
metadata_string_field_key="metadata",
doc_id_field_key="doc_id",
language_analyzer="en.lucene",
vector_algorithm_type="exhaustiveKnn",
)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("../data/paul_graham/").load_data()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
Settings.llm = llm
Settings.embed_model = embed_model
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine(similarity_top_k=3)
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
response = query_engine.query(
"What did the author learn?",
)
display(Markdown(f"<b>{response}</b>"))
index_name = "llamaindex-vector-demo"
metadata_fields = {
"author": "author",
"theme": ("topic", MetadataIndexFieldType.STRING),
"director": "director",
}
vector_store = AzureAISearchVectorStore(
search_or_index_client=search_client,
filterable_metadata_field_keys=metadata_fields,
index_management=IndexManagement.VALIDATE_INDEX,
id_field_key="id",
chunk_field_key="chunk",
embedding_field_key="embedding",
embedding_dimensionality=1536,
metadata_string_field_key="metadata",
doc_id_field_key="doc_id",
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
[],
storage_context=storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("What was a hard moment for the author?")
display(Markdown(f"<b>{response}</b>"))
response = query_engine.query("Who is the author?")
display(Markdown(f"<b>{response}</b>"))
import time
query_engine = index.as_query_engine(streaming=True)
response = query_engine.query("What happened at interleaf?")
start_time = time.time()
token_count = 0
for token in response.response_gen:
print(token, end="")
token_count += 1
time_elapsed = time.time() - start_time
tokens_per_second = token_count / time_elapsed
print(f"\n\nStreamed output at {tokens_per_second} tokens/s")
response = query_engine.query("What colour is the sky?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.core import Document
index.insert_nodes([ | Document(text="The sky is indigo today") | llama_index.core.Document |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-redis')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
REDIS_HOST = os.getenv("REDIS_HOST", "127.0.0.1")
REDIS_PORT = os.getenv("REDIS_PORT", 6379)
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.index_store.redis import RedisIndexStore
storage_context = StorageContext.from_defaults(
docstore=RedisDocumentStore.from_host_and_port(
host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index"
),
index_store=RedisIndexStore.from_host_and_port(
host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index"
),
)
storage_context.docstore.add_documents(nodes)
len(storage_context.docstore.docs)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist(persist_dir="./storage")
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore=RedisDocumentStore.from_host_and_port(
host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index"
),
index_store=RedisIndexStore.from_host_and_port(
host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index"
),
)
summary_index = load_index_from_storage(
storage_context=storage_context, index_id=list_id
)
vector_index = load_index_from_storage(
storage_context=storage_context, index_id=vector_id
)
keyword_table_index = load_index_from_storage(
storage_context=storage_context, index_id=keyword_id
)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = chatgpt
Settings.chunk_size = 1024
query_engine = summary_index.as_query_engine()
list_response = query_engine.query("What is a summary of this document?")
| display_response(list_response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt")
from llama_index.core import SimpleDirectoryReader
reader = | SimpleDirectoryReader(input_files=["pg_essay.txt"]) | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-tools-metaphor')
get_ipython().system('wget "https://images.openai.com/blob/a2e49de2-ba5b-4869-9c2d-db3b4b5dcc19/new-models-and-developer-products-announced-at-devday.jpg?width=2000" -O other_images/openai/dev_day.png')
get_ipython().system('wget "https://drive.google.com/uc\\?id\\=1B4f5ZSIKN0zTTPPRlZ915Ceb3_uF9Zlq\\&export\\=download" -O other_images/adidas.png')
from llama_index.readers.web import SimpleWebPageReader
url = "https://openai.com/blog/new-models-and-developer-products-announced-at-devday"
reader = SimpleWebPageReader(html_to_text=True)
documents = reader.load_data(urls=[url])
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
vector_index = VectorStoreIndex.from_documents(
documents,
)
query_tool = QueryEngineTool(
query_engine=vector_index.as_query_engine(),
metadata=ToolMetadata(
name=f"vector_tool",
description=(
"Useful to lookup new features announced by OpenAI"
),
),
)
from llama_index.core.agent.react_multimodal.step import (
MultimodalReActAgentWorker,
)
from llama_index.core.agent import AgentRunner
from llama_index.core.multi_modal_llms import MultiModalLLM
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core.agent import Task
mm_llm = OpenAIMultiModal(model="gpt-4-vision-preview", max_new_tokens=1000)
react_step_engine = MultimodalReActAgentWorker.from_tools(
[query_tool],
multi_modal_llm=mm_llm,
verbose=True,
)
agent = AgentRunner(react_step_engine)
query_str = (
"The photo shows some new features released by OpenAI. "
"Can you pinpoint the features in the photo and give more details using relevant tools?"
)
from llama_index.core.schema import ImageDocument
image_document = ImageDocument(image_path="other_images/openai/dev_day.png")
task = agent.create_task(
query_str,
extra_state={"image_docs": [image_document]},
)
def execute_step(agent: AgentRunner, task: Task):
step_output = agent.run_step(task.task_id)
if step_output.is_last:
response = agent.finalize_response(task.task_id)
print(f"> Agent finished: {str(response)}")
return response
else:
return None
def execute_steps(agent: AgentRunner, task: Task):
response = execute_step(agent, task)
while response is None:
response = execute_step(agent, task)
return response
response = execute_step(agent, task)
response = execute_step(agent, task)
print(str(response))
from llama_index.tools.metaphor import MetaphorToolSpec
from llama_index.core.agent.react_multimodal.step import (
MultimodalReActAgentWorker,
)
from llama_index.core.agent import AgentRunner
from llama_index.core.multi_modal_llms import MultiModalLLM
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core.agent import Task
metaphor_tool_spec = MetaphorToolSpec(
api_key="<api_key>",
)
metaphor_tools = metaphor_tool_spec.to_tool_list()
mm_llm = OpenAIMultiModal(model="gpt-4-vision-preview", max_new_tokens=1000)
react_step_engine = MultimodalReActAgentWorker.from_tools(
metaphor_tools,
multi_modal_llm=mm_llm,
verbose=True,
)
agent = AgentRunner(react_step_engine)
from llama_index.core.schema import ImageDocument
query_str = "Look up some reviews regarding these shoes."
image_document = | ImageDocument(image_path="other_images/adidas.png") | llama_index.core.schema.ImageDocument |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(
input_files=["./data/paul_graham/paul_graham_essay.txt"]
)
docs = reader.load_data()
text = docs[0].text
from llama_index.core import PromptTemplate
qa_prompt_tmpl = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Please also write the answer in the style of {tone_name}.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt = PromptTemplate(qa_prompt_tmpl)
refine_prompt_tmpl = (
"The original query is as follows: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the query. "
"Please also write the answer in the style of {tone_name}.\n"
"If the context isn't useful, return the original answer.\n"
"Refined Answer: "
)
refine_prompt = | PromptTemplate(refine_prompt_tmpl) | llama_index.core.PromptTemplate |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
| display_source_node(node) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
from llama_index.readers.file import ImageTabularChartReader
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from pathlib import Path
loader = | ImageTabularChartReader(keep_image=True) | llama_index.readers.file.ImageTabularChartReader |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('pip install llama-index')
get_ipython().system('pip install spacy')
wiki_titles = [
"Toronto",
"Seattle",
"Chicago",
"Boston",
"Houston",
"Tokyo",
"Berlin",
"Lisbon",
]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
from llama_index.core import SimpleDirectoryReader
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
city_descs_dict = {}
choices = []
choice_to_id_dict = {}
for idx, wiki_title in enumerate(wiki_titles):
vector_desc = (
"Useful for questions related to specific aspects of"
f" {wiki_title} (e.g. the history, arts and culture,"
" sports, demographics, or more)."
)
summary_desc = (
"Useful for any requests that require a holistic summary"
f" of EVERYTHING about {wiki_title}. For questions about"
" more specific sections, please use the vector_tool."
)
doc_id_vector = f"{wiki_title}_vector"
doc_id_summary = f"{wiki_title}_summary"
city_descs_dict[doc_id_vector] = vector_desc
city_descs_dict[doc_id_summary] = summary_desc
choices.extend([vector_desc, summary_desc])
choice_to_id_dict[idx * 2] = f"{wiki_title}_vector"
choice_to_id_dict[idx * 2 + 1] = f"{wiki_title}_summary"
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
llm = | OpenAI(model_name="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
import dotenv
import s3fs
import os
dotenv.load_dotenv("../../../.env")
AWS_KEY = os.environ["AWS_ACCESS_KEY_ID"]
AWS_SECRET = os.environ["AWS_SECRET_ACCESS_KEY"]
R2_ACCOUNT_ID = os.environ["R2_ACCOUNT_ID"]
assert AWS_KEY is not None and AWS_KEY != ""
s3 = s3fs.S3FileSystem(
key=AWS_KEY,
secret=AWS_SECRET,
endpoint_url=f"https://{R2_ACCOUNT_ID}.r2.cloudflarestorage.com",
s3_additional_kwargs={"ACL": "public-read"},
)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(len(documents))
index = | VectorStoreIndex.from_documents(documents, fs=s3) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-palm')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install -q google-generativeai')
import pprint
import google.generativeai as palm
palm_api_key = ""
palm.configure(api_key=palm_api_key)
models = [
m
for m in palm.list_models()
if "generateText" in m.supported_generation_methods
]
model = models[0].name
print(model)
from llama_index.llms.palm import PaLM
model = | PaLM(api_key=palm_api_key) | llama_index.llms.palm.PaLM |
import os
os.environ["OPENAI_API_KEY"] = ""
os.environ["VIDEO_DB_API_KEY"] = ""
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install videodb')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-videodb')
from videodb import connect
conn = connect()
print("uploading first video")
video1 = conn.upload(url="https://www.youtube.com/watch?v=lsODSDmY4CY")
print("uploading second video")
video2 = conn.upload(url="https://www.youtube.com/watch?v=vZ4kOr38JhY")
print("Indexing the videos...")
video1.index_spoken_words()
video2.index_spoken_words()
from llama_index.retrievers.videodb import VideoDBRetriever
from llama_index.core import get_response_synthesizer
from llama_index.core.query_engine import RetrieverQueryEngine
retriever = VideoDBRetriever()
response_synthesizer = | get_response_synthesizer() | llama_index.core.get_response_synthesizer |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import SimpleDirectoryReader, VectorStoreIndex
import requests
response = requests.get(
"https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1"
)
essay_txt = response.text
with open("pg_essay.txt", "w") as fp:
fp.write(essay_txt)
documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
query_engine_tool = QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name="paul_graham",
description=(
"Provides a biography of Paul Graham, from childhood to college to adult"
" life"
),
),
)
from llama_index.tools.text_to_image.base import TextToImageToolSpec
from llama_index.llms import OpenAI
llm = | OpenAI(model="gpt-4") | llama_index.llms.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = CohereRerank(top_n=10)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
response = query_engine.query(
"Tell me about the main aspects of safety fine-tuning"
)
display_response(
response, show_source=True, source_length=500, show_source_metadata=True
)
from collections import defaultdict
import pandas as pd
def mrr_all(metadata_values, metadata_key, source_nodes):
value_to_mrr_dict = {}
for metadata_value in metadata_values:
mrr = 0
for idx, source_node in enumerate(source_nodes):
if source_node.node.metadata[metadata_key] == metadata_value:
mrr = 1 / (idx + 1)
break
else:
continue
value_to_mrr_dict[metadata_value] = mrr
df = pd.DataFrame(value_to_mrr_dict, index=["MRR"])
df.style.set_caption("Mean Reciprocal Rank")
return df
print("Mean Reciprocal Rank for each Chunk Size")
mrr_all(chunk_sizes, "chunk_size", response.source_nodes)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_llm = OpenAI(model="gpt-4")
dataset_generator = DatasetGenerator(
nodes_list[-1],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=2,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
import asyncio
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
RelevancyEvaluator,
FaithfulnessEvaluator,
PairwiseComparisonEvaluator,
)
evaluator_c = | CorrectnessEvaluator(llm=eval_llm) | llama_index.core.evaluation.CorrectnessEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-llms-ai21')
get_ipython().system('pip install llama-index')
from llama_index.llms.ai21 import AI21
api_key = "Your api key"
resp = AI21(api_key=api_key).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.ai21 import AI21
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = | AI21(api_key=api_key) | llama_index.llms.ai21.AI21 |
get_ipython().run_line_magic('pip', 'install -U llama-index llama-index-embeddings-nomic')
nomic_api_key = "<NOMIC API KEY>"
import nest_asyncio
nest_asyncio.apply()
from llama_index.embeddings.nomic import NomicEmbedding
embed_model = NomicEmbedding(
api_key=nomic_api_key,
dimensionality=128,
model_name="nomic-embed-text-v1.5",
)
embedding = embed_model.get_text_embedding("Nomic Embeddings")
print(len(embedding))
embedding[:5]
embed_model = NomicEmbedding(
api_key=nomic_api_key,
dimensionality=256,
model_name="nomic-embed-text-v1.5",
)
embedding = embed_model.get_text_embedding("Nomic Embeddings")
print(len(embedding))
embedding[:5]
embed_model = NomicEmbedding(
api_key=nomic_api_key,
dimensionality=768,
model_name="nomic-embed-text-v1.5",
)
embedding = embed_model.get_text_embedding("Nomic Embeddings")
print(len(embedding))
embedding[:5]
embed_model = NomicEmbedding(
api_key=nomic_api_key, model_name="nomic-embed-text-v1"
)
embedding = embed_model.get_text_embedding("Nomic Embeddings")
print(len(embedding))
embedding[:5]
from llama_index.core import settings
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
import os
os.environ["OPENAI_API_KEY"] = "<YOUR OPENAI API KEY>"
embed_model = NomicEmbedding(
api_key=nomic_api_key,
dimensionality=128,
model_name="nomic-embed-text-v1.5",
)
llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().system('pip install llama-index')
get_ipython().system('pip install promptlayer')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
os.environ["PROMPTLAYER_API_KEY"] = "pl_..."
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
docs = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.core import set_global_handler
set_global_handler("promptlayer", pl_tags=["paul graham", "essay"])
from llama_index.core import VectorStoreIndex
index = | VectorStoreIndex.from_documents(docs) | llama_index.core.VectorStoreIndex.from_documents |
import warnings
warnings.filterwarnings("ignore")
import os
from llama_index.tools.cogniswitch import CogniswitchToolSpec
from llama_index.agent import ReActAgent
toolspec = | CogniswitchToolSpec(cs_token=cs_token, apiKey=oauth_token) | llama_index.tools.cogniswitch.CogniswitchToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-readers-deeplake')
get_ipython().system('pip install llama-index')
import getpass
import os
import random
import textwrap
from llama_index.core import VectorStoreIndex
from llama_index.readers.deeplake import DeepLakeReader
os.environ["OPENAI_API_KEY"] = getpass.getpass("open ai api key: ")
reader = DeepLakeReader()
query_vector = [random.random() for _ in range(1536)]
documents = reader.load_data(
query_vector=query_vector,
dataset_path="hub://activeloop/paul_graham_essay",
limit=5,
)
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = | OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2) | llama_index.llms.openai.OpenAI |
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = | SimpleDirectoryReader("./data/paul_graham") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install -U llama-index llama-index-embeddings-nomic')
nomic_api_key = "<NOMIC API KEY>"
import nest_asyncio
nest_asyncio.apply()
from llama_index.embeddings.nomic import NomicEmbedding
embed_model = NomicEmbedding(
api_key=nomic_api_key,
dimensionality=128,
model_name="nomic-embed-text-v1.5",
)
embedding = embed_model.get_text_embedding("Nomic Embeddings")
print(len(embedding))
embedding[:5]
embed_model = NomicEmbedding(
api_key=nomic_api_key,
dimensionality=256,
model_name="nomic-embed-text-v1.5",
)
embedding = embed_model.get_text_embedding("Nomic Embeddings")
print(len(embedding))
embedding[:5]
embed_model = NomicEmbedding(
api_key=nomic_api_key,
dimensionality=768,
model_name="nomic-embed-text-v1.5",
)
embedding = embed_model.get_text_embedding("Nomic Embeddings")
print(len(embedding))
embedding[:5]
embed_model = NomicEmbedding(
api_key=nomic_api_key, model_name="nomic-embed-text-v1"
)
embedding = embed_model.get_text_embedding("Nomic Embeddings")
print(len(embedding))
embedding[:5]
from llama_index.core import settings
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
import os
os.environ["OPENAI_API_KEY"] = "<YOUR OPENAI API KEY>"
embed_model = NomicEmbedding(
api_key=nomic_api_key,
dimensionality=128,
model_name="nomic-embed-text-v1.5",
)
llm = OpenAI(model="gpt-3.5-turbo")
settings.llm = llm
settings.embed_model = embed_model
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-.."
openai.api_key = os.environ["OPENAI_API_KEY"]
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
)
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
from llama_index.core import SQLDatabase
from llama_index.llms.openai import OpenAI
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
sql_database = | SQLDatabase(engine, include_tables=["city_stats"]) | llama_index.core.SQLDatabase |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-output-parsers-guardrails')
get_ipython().system('pip install guardrails-ai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents, chunk_size=512)
from llama_index.output_parsers.guardrails import GuardrailsOutputParser
from pydantic import BaseModel, Field
from typing import List
import guardrails as gd
class Point(BaseModel):
explanation: str = Field()
explanation2: str = Field()
explanation3: str = Field()
class BulletPoints(BaseModel):
points: List[Point] = Field(
description="Bullet points regarding events in the author's life."
)
prompt = """
Query string here.
${gr.xml_prefix_prompt}
${output_schema}
${gr.json_suffix_prompt_v2_wo_none}
"""
from llama_index.llms.openai import OpenAI
guard = gd.Guard.from_pydantic(output_class=BulletPoints, prompt=prompt)
output_parser = GuardrailsOutputParser(guard, llm= | OpenAI() | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system('pip install "llama_index>=0.9.7"')
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.extractors import TitleExtractor, SummaryExtractor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
def build_pipeline():
llm = | OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1) | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo-instruct", temperature=0.1)
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
from llama_index.core.postprocessor import LongContextReorder
reorder = LongContextReorder()
reorder_engine = index.as_query_engine(
node_postprocessors=[reorder], similarity_top_k=5
)
base_engine = index.as_query_engine(similarity_top_k=5)
from llama_index.core.response.notebook_utils import display_response
base_response = base_engine.query("Did the author meet Sam Altman?")
| display_response(base_response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(response_mode="tree_summarize")
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
prompts_dict = query_engine.response_synthesizer.get_prompts()
display_prompt_dict(prompts_dict)
query_engine = index.as_query_engine(response_mode="compact")
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core import PromptTemplate
query_engine = index.as_query_engine(response_mode="tree_summarize")
new_summary_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query in the style of a Shakespeare play.\n"
"Query: {query_str}\n"
"Answer: "
)
new_summary_tmpl = PromptTemplate(new_summary_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:summary_template": new_summary_tmpl}
)
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core.query_engine import (
RouterQueryEngine,
FLAREInstructQueryEngine,
)
from llama_index.core.selectors import LLMMultiSelector
from llama_index.core.evaluation import FaithfulnessEvaluator, DatasetGenerator
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.tools import QueryEngineTool
query_tool = QueryEngineTool.from_defaults(
query_engine=query_engine, description="test description"
)
router_query_engine = RouterQueryEngine.from_defaults([query_tool])
prompts_dict = router_query_engine.get_prompts()
display_prompt_dict(prompts_dict)
flare_query_engine = | FLAREInstructQueryEngine(query_engine) | llama_index.core.query_engine.FLAREInstructQueryEngine |
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.postprocessor import LLMRerank
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import Settings
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.packs.koda_retriever import KodaRetriever
import os
from pinecone import Pinecone
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index = pc.Index("sample-movies")
Settings.llm = OpenAI()
Settings.embed_model = OpenAIEmbedding()
vector_store = PineconeVectorStore(pinecone_index=index, text_key="summary")
vector_index = VectorStoreIndex.from_vector_store(
vector_store=vector_store, embed_model=Settings.embed_model
)
reranker = LLMRerank(llm=Settings.llm) # optional
categories = { # key, #alpha, description [and examples]
"concept seeking query": {
"alpha": 0.2,
"description": "Abstract questions, usually on a specific topic, that require multiple sentences to answer",
"examples": [
"What is the dual-encoder architecture used in recent works on dense retrievers?",
"Why should I use semantic search to rank results?",
],
},
"fact seeking query": {
"alpha": 0.6,
"description": "Queries with a single, clear answer",
"examples": [
"What is the total number of propositions the English Wikipedia dump is segmented into in FACTOID WIKI?",
"How many documents are semantically ranked?",
],
},
"queries with misspellings": {
"alpha": 1,
"description": "Queries with typos, transpositions and common misspellings introduced",
"examples": [
"What is the advntage of prposition retrieval over sentnce or passage retrieval?",
"Ho w mny documents are samantically r4nked",
],
},
}
retriever = KodaRetriever(
index=vector_index,
llm=Settings.llm,
matrix=categories, # koda now knows to use these categories
reranker=reranker, # optional
verbose=True,
)
query = "Can you explain the Jurassic Park as a business as it was supposed to operate inside the movie's lore or timeline?"
results = retriever.retrieve(query)
results
query_engine = | RetrieverQueryEngine.from_args(retriever=retriever) | llama_index.core.query_engine.RetrieverQueryEngine.from_args |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(vector_store_query_mode="mmr")
response = query_engine.query("What did the author do growing up?")
print(response)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine_with_threshold = index.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response = query_engine_with_threshold.query(
"What did the author do growing up?"
)
print(response)
index1 = VectorStoreIndex.from_documents(documents)
query_engine_no_mrr = index1.as_query_engine()
response_no_mmr = query_engine_no_mrr.query(
"What did the author do growing up?"
)
index2 = VectorStoreIndex.from_documents(documents)
query_engine_with_high_threshold = index2.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.8}
)
response_low_threshold = query_engine_with_high_threshold.query(
"What did the author do growing up?"
)
index3 = VectorStoreIndex.from_documents(documents)
query_engine_with_low_threshold = index3.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response_high_threshold = query_engine_with_low_threshold.query(
"What did the author do growing up?"
)
print(
"Scores without MMR ",
[node.score for node in response_no_mmr.source_nodes],
)
print(
"Scores with MMR and a threshold of 0.8 ",
[node.score for node in response_high_threshold.source_nodes],
)
print(
"Scores with MMR and a threshold of 0.2 ",
[node.score for node in response_low_threshold.source_nodes],
)
documents = SimpleDirectoryReader("../data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(
documents,
)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 0.1},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
from llama_index.core.response.notebook_utils import display_source_node
for n in nodes:
display_source_node(n, source_length=1000)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 0.5},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
for n in nodes:
display_source_node(n, source_length=1000)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 0.8},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
for n in nodes:
display_source_node(n, source_length=1000)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 1.0},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
for n in nodes:
| display_source_node(n, source_length=1000) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system('pip install -q llama-index google-generativeai')
get_ipython().run_line_magic('env', 'GOOGLE_API_KEY=...')
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.llms.gemini import Gemini
resp = Gemini().complete("Write a poem about a magic backpack")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.gemini import Gemini
messages = [
ChatMessage(role="user", content="Hello friend!"),
ChatMessage(role="assistant", content="Yarr what is shakin' matey?"),
ChatMessage(
role="user", content="Help me decide what to have for dinner."
),
]
resp = Gemini().chat(messages)
print(resp)
from llama_index.llms.gemini import Gemini
llm = Gemini()
resp = llm.stream_complete(
"The story of Sourcrust, the bread creature, is really interesting. It all started when..."
)
for r in resp:
print(r.text, end="")
from llama_index.llms.gemini import Gemini
from llama_index.core.llms import ChatMessage
llm = Gemini()
messages = [
ChatMessage(role="user", content="Hello friend!"),
ChatMessage(role="assistant", content="Yarr what is shakin' matey?"),
ChatMessage(
role="user", content="Help me decide what to have for dinner."
),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
import google.generativeai as genai
for m in genai.list_models():
if "generateContent" in m.supported_generation_methods:
print(m.name)
from llama_index.llms.gemini import Gemini
llm = | Gemini(model="models/gemini-pro") | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-llms-monsterapi')
get_ipython().system('python3 -m pip install llama-index --quiet -y')
get_ipython().system('python3 -m pip install monsterapi --quiet')
get_ipython().system('python3 -m pip install sentence_transformers --quiet')
import os
from llama_index.llms.monsterapi import MonsterLLM
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
os.environ["MONSTER_API_KEY"] = ""
model = "llama2-7b-chat"
llm = MonsterLLM(model=model, temperature=0.75)
result = llm.complete("Who are you?")
print(result)
from llama_index.core.llms import ChatMessage
history_message = ChatMessage(
**{
"role": "user",
"content": (
"When asked 'who are you?' respond as 'I am qblocks llm model'"
" everytime."
),
}
)
current_message = ChatMessage(**{"role": "user", "content": "Who are you?"})
response = llm.chat([history_message, current_message])
print(response)
get_ipython().system('python3 -m pip install pypdf --quiet')
get_ipython().system('rm -r ./data')
get_ipython().system('mkdir -p data&&cd data&&curl \'https://arxiv.org/pdf/2005.11401.pdf\' -o "RAG.pdf"')
documents = SimpleDirectoryReader("./data").load_data()
llm = MonsterLLM(model=model, temperature=0.75, context_window=1024)
embed_model = resolve_embed_model("local:BAAI/bge-small-en-v1.5")
splitter = | SentenceSplitter(chunk_size=1024) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
llm = | OpenAI(model="gpt-3.5-turbo-0613") | llama_index.llms.openai.OpenAI |
get_ipython().system('pip install llama-index llama-index-packs-raptor llama-index-vector-stores-qdrant')
from llama_index.packs.raptor import RaptorPack
get_ipython().system('wget https://arxiv.org/pdf/2401.18059.pdf -O ./raptor_paper.pdf')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(input_files=["./raptor_paper.pdf"]).load_data()
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
import chromadb
client = chromadb.PersistentClient(path="./raptor_paper_db")
collection = client.get_or_create_collection("raptor")
vector_store = | ChromaVectorStore(chroma_collection=collection) | llama_index.vector_stores.chroma.ChromaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.storage.kvstore.firestore import FirestoreKVStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.index_store.firestore import FirestoreIndexStore
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore= | FirestoreDocumentStore(kvstore) | llama_index.storage.docstore.firestore.FirestoreDocumentStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core.node_parser import SentenceSplitter
node_parser = | SentenceSplitter(chunk_size=256) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-redis')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-readers-google')
get_ipython().system('docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.ingestion import (
DocstoreStrategy,
IngestionPipeline,
IngestionCache,
)
from llama_index.core.ingestion.cache import RedisCache
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.core.node_parser import SentenceSplitter
from llama_index.vector_stores.redis import RedisVectorStore
vector_store = RedisVectorStore(
index_name="redis_vector_store",
index_prefix="vectore_store",
redis_url="redis://localhost:6379",
)
cache = IngestionCache(
cache= | RedisCache.from_host_and_port("localhost", 6379) | llama_index.core.ingestion.cache.RedisCache.from_host_and_port |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-awadb')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
)
from IPython.display import Markdown, display
import openai
openai.api_key = ""
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.vector_stores.awadb import AwaDBVectorStore
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
vector_store = AwaDBVectorStore()
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import tiktoken
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
)
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2)
Settings.callback_manager = CallbackManager([token_counter])
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = | SimpleDirectoryReader("./data/paul_graham") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, get_response_synthesizer
from llama_index.core import DocumentSummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.node_parser import SentenceSplitter
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
city_docs = []
for wiki_title in wiki_titles:
docs = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
docs[0].doc_id = wiki_title
city_docs.extend(docs)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
splitter = SentenceSplitter(chunk_size=1024)
response_synthesizer = get_response_synthesizer(
response_mode="tree_summarize", use_async=True
)
doc_summary_index = DocumentSummaryIndex.from_documents(
city_docs,
llm=chatgpt,
transformations=[splitter],
response_synthesizer=response_synthesizer,
show_progress=True,
)
doc_summary_index.get_document_summary("Boston")
doc_summary_index.storage_context.persist("index")
from llama_index.core import load_index_from_storage
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults(persist_dir="index")
doc_summary_index = load_index_from_storage(storage_context)
query_engine = doc_summary_index.as_query_engine(
response_mode="tree_summarize", use_async=True
)
response = query_engine.query("What are the sports teams in Toronto?")
print(response)
from llama_index.core.indices.document_summary import (
DocumentSummaryIndexLLMRetriever,
)
retriever = DocumentSummaryIndexLLMRetriever(
doc_summary_index,
)
retrieved_nodes = retriever.retrieve("What are the sports teams in Toronto?")
print(len(retrieved_nodes))
print(retrieved_nodes[0].score)
print(retrieved_nodes[0].node.get_text())
from llama_index.core.query_engine import RetrieverQueryEngine
response_synthesizer = | get_response_synthesizer(response_mode="tree_summarize") | llama_index.core.get_response_synthesizer |
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex
from llama_index.core import SimpleDirectoryReader
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = | SimpleDirectoryReader("./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
download_loader,
RAKEKeywordTableIndex,
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
reader = | SimpleDirectoryReader(input_files=["./data/10k/lyft_2021.pdf"]) | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index qdrant_client')
import qdrant_client
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
client = qdrant_client.QdrantClient(
location=":memory:"
)
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
import os
from llama_index.core import StorageContext
os.environ["OPENAI_API_KEY"] = "sk-..."
vector_store = QdrantVectorStore(
client=client, collection_name="test_collection_1"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = | VectorStoreIndex(nodes, storage_context=storage_context) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index qdrant_client pyMuPDF tools frontend git+https://github.com/openai/CLIP.git easyocr')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import Patch
import io
from PIL import Image, ImageDraw
import numpy as np
import csv
import pandas as pd
from torchvision import transforms
from transformers import AutoModelForObjectDetection
import torch
import openai
import os
import fitz
device = "cuda" if torch.cuda.is_available() else "cpu"
OPENAI_API_TOKEN = "sk-<your-openai-api-token>"
openai.api_key = OPENAI_API_TOKEN
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "llama2.pdf"')
pdf_file = "llama2.pdf"
output_directory_path, _ = os.path.splitext(pdf_file)
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
pdf_document = fitz.open(pdf_file)
for page_number in range(pdf_document.page_count):
page = pdf_document[page_number]
pix = page.get_pixmap()
image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
image.save(f"./{output_directory_path}/page_{page_number + 1}.png")
pdf_document.close()
from PIL import Image
import matplotlib.pyplot as plt
import os
image_paths = []
for img_path in os.listdir("./llama2"):
image_paths.append(str(os.path.join("./llama2", img_path)))
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(3, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 9:
break
plot_images(image_paths[9:12])
import qdrant_client
from llama_index.core import SimpleDirectoryReader
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.core.schema import ImageDocument
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.schema import ImageNode
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1500
)
documents_images = SimpleDirectoryReader("./llama2/").load_data()
client = qdrant_client.QdrantClient(path="qdrant_index")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
index = MultiModalVectorStoreIndex.from_documents(
documents_images,
storage_context=storage_context,
)
retriever_engine = index.as_retriever(image_similarity_top_k=2)
from llama_index.core.indices.multi_modal.retriever import (
MultiModalVectorIndexRetriever,
)
query = "Compare llama2 with llama1?"
assert isinstance(retriever_engine, MultiModalVectorIndexRetriever)
retrieval_results = retriever_engine.text_to_image_retrieve(query)
retrieved_images = []
for res_node in retrieval_results:
if isinstance(res_node.node, ImageNode):
retrieved_images.append(res_node.node.metadata["file_path"])
else:
| display_source_node(res_node, source_length=200) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
len(eval_dataset.qr_pairs)
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
nodes = pipeline.run(documents=docs)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_eval_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_responses_ref
)
return eval_results
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0)
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600)
html_parser = HTMLNodeParser.from_defaults()
parser_dict = {
"sent_parser_o0": sent_parser_o0,
"sent_parser_o200": sent_parser_o200,
"sent_parser_o500": sent_parser_o500,
}
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
pipeline_dict = {}
for k, parser in parser_dict.items():
pipeline = IngestionPipeline(
documents=docs,
transformations=[
html_parser,
parser,
OpenAIEmbedding(),
],
)
pipeline_dict[k] = pipeline
eval_results_dict = {}
for k, pipeline in pipeline_dict.items():
eval_results = await run_evals(
pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs
)
eval_results_dict[k] = eval_results
import pickle
pickle.dump(eval_results_dict, open("eval_results_1.pkl", "wb"))
eval_results_list = list(eval_results_dict.items())
results_df = get_results_df(
[v for _, v in eval_results_list],
[k for k, _ in eval_results_list],
["correctness", "semantic_similarity"],
)
display(results_df)
for k, pipeline in pipeline_dict.items():
pipeline.cache.persist(f"./cache/{k}.json")
from llama_index.core.extractors import (
TitleExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
)
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
extractor_dict = {
"summary": | SummaryExtractor(in_place=False) | llama_index.core.extractors.SummaryExtractor |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
import json
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
TRAIN_FILES = ["./data/10k/lyft_2021.pdf"]
VAL_FILES = ["./data/10k/uber_2021.pdf"]
TRAIN_CORPUS_FPATH = "./data/train_corpus.json"
VAL_CORPUS_FPATH = "./data/val_corpus.json"
def load_corpus(files, verbose=False):
if verbose:
print(f"Loading files {files}")
reader = SimpleDirectoryReader(input_files=files)
docs = reader.load_data()
if verbose:
print(f"Loaded {len(docs)} docs")
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(docs, show_progress=verbose)
if verbose:
print(f"Parsed {len(nodes)} nodes")
return nodes
train_nodes = load_corpus(TRAIN_FILES, verbose=True)
val_nodes = load_corpus(VAL_FILES, verbose=True)
from llama_index.finetuning import generate_qa_embedding_pairs
from llama_index.core.evaluation import EmbeddingQAFinetuneDataset
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from llama_index.llms.openai import OpenAI
train_dataset = generate_qa_embedding_pairs(
llm= | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(input_files=["pg_essay.txt"])
documents = reader.load_data()
from llama_index.core.query_pipeline import QueryPipeline, InputComponent
from typing import Dict, Any, List, Optional
from llama_index.llms.openai import OpenAI
from llama_index.core import Document, VectorStoreIndex
from llama_index.core import SummaryIndex
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.schema import NodeWithScore, TextNode
from llama_index.core import PromptTemplate
from llama_index.core.selectors import LLMSingleSelector
hyde_str = """\
Please write a passage to answer the question: {query_str}
Try to include as many key details as possible.
Passage: """
hyde_prompt = PromptTemplate(hyde_str)
llm = OpenAI(model="gpt-3.5-turbo")
summarizer = TreeSummarize(llm=llm)
vector_index = VectorStoreIndex.from_documents(documents)
vector_query_engine = vector_index.as_query_engine(similarity_top_k=2)
summary_index = SummaryIndex.from_documents(documents)
summary_qrewrite_str = """\
Here's a question:
{query_str}
You are responsible for feeding the question to an agent that given context will try to answer the question.
The context may or may not be relevant. Rewrite the question to highlight the fact that
only some pieces of context (or none) maybe be relevant.
"""
summary_qrewrite_prompt = PromptTemplate(summary_qrewrite_str)
summary_query_engine = summary_index.as_query_engine()
selector = LLMSingleSelector.from_defaults()
from llama_index.core.query_pipeline import RouterComponent
vector_chain = QueryPipeline(chain=[vector_query_engine])
summary_chain = QueryPipeline(
chain=[summary_qrewrite_prompt, llm, summary_query_engine], verbose=True
)
choices = [
"This tool answers specific questions about the document (not summary questions across the document)",
"This tool answers summary questions about the document (not specific questions)",
]
router_c = RouterComponent(
selector=selector,
choices=choices,
components=[vector_chain, summary_chain],
verbose=True,
)
qp = | QueryPipeline(chain=[router_c], verbose=True) | llama_index.core.query_pipeline.QueryPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-azure-openai')
get_ipython().system('pip install openai')
import os
os.environ["AZURE_OPENAI_API_KEY"] = "<your-api-key>"
os.environ[
"AZURE_OPENAI_ENDPOINT"
] = "https://<your-resource-name>.openai.azure.com/"
os.environ["OPENAI_API_VERSION"] = "2023-12-01-preview"
from llama_index.multi_modal_llms.azure_openai import AzureOpenAIMultiModal
azure_openai_mm_llm = AzureOpenAIMultiModal(
engine="gpt-4-vision-preview",
api_version="2023-12-01-preview",
model="gpt-4-vision-preview",
max_new_tokens=300,
)
azure_openai_mm_llm = AzureOpenAIMultiModal(
azure_endpoint="https://<your-endpoint>.openai.azure.com",
engine="gpt-4-vision-preview",
api_version="2023-12-01-preview",
model="gpt-4-vision-preview",
max_new_tokens=300,
)
import base64
import requests
from llama_index.core.schema import ImageDocument
image_url = "https://www.visualcapitalist.com/wp-content/uploads/2023/10/US_Mortgage_Rate_Surge-Sept-11-1.jpg"
response = requests.get(image_url)
if response.status_code != 200:
raise ValueError("Error: Could not retrieve image from URL.")
base64str = base64.b64encode(response.content).decode("utf-8")
image_document = | ImageDocument(image=base64str, image_mimetype="image/jpeg") | llama_index.core.schema.ImageDocument |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
import nest_asyncio
nest_asyncio.apply()
from llama_index.embeddings.huggingface import (
HuggingFaceEmbedding,
HuggingFaceInferenceAPIEmbedding,
)
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
model_name = "jinaai/jina-embeddings-v2-small-en"
embed_model = HuggingFaceEmbedding(
model_name=model_name, trust_remote_code=True
)
Settings.embed_model = embed_model
Settings.chunk_size = 1024
embed_model_base = OpenAIEmbedding()
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
index_jina = | VectorStoreIndex.from_documents(docs, embed_model=embed_model) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
import nest_asyncio
nest_asyncio.apply()
import os
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
import pandas as pd
def display_eval_df(question, source, answer_a, answer_b, result) -> None:
"""Pretty print question/answer + gpt-4 judgement dataset."""
eval_df = pd.DataFrame(
{
"Question": question,
"Source": source,
"Model A": answer_a["model"],
"Answer A": answer_a["text"],
"Model B": answer_b["model"],
"Answer B": answer_b["text"],
"Score": result.score,
"Judgement": result.feedback,
},
index=[0],
)
eval_df = eval_df.style.set_properties(
**{
"inline-size": "300px",
"overflow-wrap": "break-word",
},
subset=["Answer A", "Answer B"]
)
display(eval_df)
get_ipython().system('pip install wikipedia -q')
from llama_index.readers.wikipedia import WikipediaReader
train_cities = [
"San Francisco",
"Toronto",
"New York",
"Vancouver",
"Montreal",
"Boston",
]
test_cities = [
"Tokyo",
"Singapore",
"Paris",
]
train_documents = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks')
get_ipython().run_line_magic('pip', 'install llama-index')
from llama_index.llms.fireworks import Fireworks
resp = Fireworks().complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.fireworks import Fireworks
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Fireworks().chat(messages)
print(resp)
from llama_index.llms.fireworks import Fireworks
llm = | Fireworks() | llama_index.llms.fireworks.Fireworks |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = PyMuPDFReader().load_data(
file_path="./llama2.pdf", metadata=True
)
attention_docs = PyMuPDFReader().load_data(
file_path="./attention.pdf", metadata=True
)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core.node_parser import TokenTextSplitter
nodes = TokenTextSplitter(
chunk_size=1024, chunk_overlap=128
).get_nodes_from_documents(llama2_docs + attention_docs)
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes=nodes)
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=docstore, similarity_top_k=2
)
from llama_index.core.schema import IndexNode
vector_obj = IndexNode(
index_id="vector", obj=vector_retriever, text="Vector Retriever"
)
bm25_obj = IndexNode(
index_id="bm25", obj=bm25_retriever, text="BM25 Retriever"
)
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(objects=[vector_obj, bm25_obj])
query_engine = summary_index.as_query_engine(
response_mode="tree_summarize", verbose=True
)
response = await query_engine.aquery(
"How does attention work in transformers?"
)
print(str(response))
response = await query_engine.aquery(
"What is the architecture of Llama2 based on?"
)
print(str(response))
response = await query_engine.aquery(
"What was used before attention in transformers?"
)
print(str(response))
docstore.persist("./docstore.json")
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
docstore = SimpleDocumentStore.from_persist_path("./docstore.json")
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
index = VectorStoreIndex.from_vector_store(vector_store)
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=docstore, similarity_top_k=2
)
from llama_index.core.schema import IndexNode
vector_obj = IndexNode(
index_id="vector", obj=vector_retriever, text="Vector Retriever"
)
bm25_obj = IndexNode(
index_id="bm25", obj=bm25_retriever, text="BM25 Retriever"
)
from llama_index.core import SummaryIndex
summary_index = | SummaryIndex(objects=[vector_obj, bm25_obj]) | llama_index.core.SummaryIndex |
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from os import getenv
from llama_index.core import SimpleDirectoryReader
from llama_index.vector_stores.opensearch import (
OpensearchVectorStore,
OpensearchVectorClient,
)
from llama_index.core import VectorStoreIndex, StorageContext
endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200")
idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
text_field = "content"
embedding_field = "embedding"
client = OpensearchVectorClient(
endpoint, idx, 1536, embedding_field=embedding_field, text_field=text_field
)
vector_store = OpensearchVectorStore(client)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.postprocessor import LLMRerank
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import Settings
from llama_index.packs.koda_retriever import KodaRetriever
from llama_index.core.evaluation import RetrieverEvaluator
from llama_index.core import SimpleDirectoryReader
import os
from pinecone import Pinecone
from llama_index.core.node_parser import SemanticSplitterNodeParser
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.evaluation import generate_qa_embedding_pairs
import pandas as pd
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index = pc.Index("llama2-paper") # this was previously created in my pinecone account
Settings.llm = OpenAI()
Settings.embed_model = OpenAIEmbedding()
vector_store = | PineconeVectorStore(pinecone_index=index) | llama_index.vector_stores.pinecone.PineconeVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.agent import (
CustomSimpleAgentWorker,
Task,
AgentChatResponse,
)
from typing import Dict, Any, List, Tuple, Optional
from llama_index.core.tools import BaseTool, QueryEngineTool
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core import ChatPromptTemplate, PromptTemplate
from llama_index.core.selectors import PydanticSingleSelector
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.llms import ChatMessage, MessageRole
DEFAULT_PROMPT_STR = """
Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \
a modified question that will not trigger the error.
Examples of modified questions:
- The question itself is modified to elicit a non-erroneous response
- The question is augmented with context that will help the downstream system better answer the question.
- The question is augmented with examples of negative responses, or other negative questions.
An error means that either an exception has triggered, or the response is completely irrelevant to the question.
Please return the evaluation of the response in the following JSON format.
"""
def get_chat_prompt_template(
system_prompt: str, current_reasoning: Tuple[str, str]
) -> ChatPromptTemplate:
system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt)
messages = [system_msg]
for raw_msg in current_reasoning:
if raw_msg[0] == "user":
messages.append(
ChatMessage(role=MessageRole.USER, content=raw_msg[1])
)
else:
messages.append(
ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1])
)
return ChatPromptTemplate(message_templates=messages)
class ResponseEval(BaseModel):
"""Evaluation of whether the response has an error."""
has_error: bool = Field(
..., description="Whether the response has an error."
)
new_question: str = Field(..., description="The suggested new question.")
explanation: str = Field(
...,
description=(
"The explanation for the error as well as for the new question."
"Can include the direct stack trace as well."
),
)
from llama_index.core.bridge.pydantic import PrivateAttr
class RetryAgentWorker(CustomSimpleAgentWorker):
"""Agent worker that adds a retry layer on top of a router.
Continues iterating until there's no errors / task is done.
"""
prompt_str: str = Field(default=DEFAULT_PROMPT_STR)
max_iterations: int = Field(default=10)
_router_query_engine: RouterQueryEngine = | PrivateAttr() | llama_index.core.bridge.pydantic.PrivateAttr |
get_ipython().system('pip install llama-index')
from pydantic import BaseModel
from typing import List
from llama_index.core.program import LLMTextCompletionProgram
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
from llama_index.core.program import LLMTextCompletionProgram
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
program = LLMTextCompletionProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
verbose=True,
)
output = program(movie_name="The Shining")
output
from llama_index.core.output_parsers import PydanticOutputParser
program = LLMTextCompletionProgram.from_defaults(
output_parser= | PydanticOutputParser(output_cls=Album) | llama_index.core.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-readers-dashvector')
get_ipython().system('pip install llama-index')
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
api_key = os.environ["DASHVECTOR_API_KEY"]
from llama_index.readers.dashvector import DashVectorReader
reader = | DashVectorReader(api_key=api_key) | llama_index.readers.dashvector.DashVectorReader |
get_ipython().system('pip install llama-index-llms-ollama')
get_ipython().system('pip install llama-index')
from llama_index.llms.ollama import Ollama
gemma_2b = Ollama(model="gemma:2b", request_timeout=30.0)
gemma_7b = Ollama(model="gemma:7b", request_timeout=30.0)
resp = gemma_2b.complete("Who is Paul Graham?")
print(resp)
resp = gemma_7b.complete("Who is Paul Graham?")
print(resp)
resp = gemma_2b.complete("Who is owning Tesla?")
print(resp)
resp = gemma_7b.complete("Who is owning Tesla?")
print(resp)
from llama_index.core.llms import ChatMessage
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = gemma_7b.chat(messages)
print(resp)
response = gemma_7b.stream_complete("Who is Paul Graham?")
for r in response:
print(r.delta, end="")
from llama_index.core.llms import ChatMessage
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
| ChatMessage(role="user", content="What is your name") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True)
output = p.run(movie_name="The Departed")
print(str(output))
from typing import List
from pydantic import BaseModel, Field
from llama_index.core.output_parsers import PydanticOutputParser
class Movie(BaseModel):
"""Object representing a single movie."""
name: str = Field(..., description="Name of the movie.")
year: int = Field(..., description="Year of the movie.")
class Movies(BaseModel):
"""Object representing a list of movies."""
movies: List[Movie] = Field(..., description="List of movies.")
llm = OpenAI(model="gpt-3.5-turbo")
output_parser = PydanticOutputParser(Movies)
json_prompt_str = """\
Please generate related movies to {movie_name}. Output with the following JSON format:
"""
json_prompt_str = output_parser.format(json_prompt_str)
json_prompt_tmpl = PromptTemplate(json_prompt_str)
p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True)
output = p.run(movie_name="Toy Story")
output
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
prompt_str2 = """\
Here's some text:
{text}
Can you rewrite this with a summary of each movie?
"""
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
llm_c = llm.as_query_component(streaming=True)
p = QueryPipeline(
chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True
)
output = p.run(movie_name="The Dark Knight")
for o in output:
print(o.delta, end="")
p = QueryPipeline(
chain=[
json_prompt_tmpl,
llm.as_query_component(streaming=True),
output_parser,
],
verbose=True,
)
output = p.run(movie_name="Toy Story")
print(output)
from llama_index.postprocessor.cohere_rerank import CohereRerank
prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl1 = PromptTemplate(prompt_str1)
prompt_str2 = (
"Please write a passage to answer the question\n"
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{query_str}\n"
"\n"
"\n"
'Passage:"""\n'
)
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=5)
p = QueryPipeline(
chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever], verbose=True
)
nodes = p.run(topic="college")
len(nodes)
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
prompt_str = "Please generate a question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=3)
reranker = CohereRerank()
summarizer = TreeSummarize(llm=llm)
p = QueryPipeline(verbose=True)
p.add_modules(
{
"llm": llm,
"prompt_tmpl": prompt_tmpl,
"retriever": retriever,
"summarizer": summarizer,
"reranker": reranker,
}
)
p.add_link("prompt_tmpl", "llm")
p.add_link("llm", "retriever")
p.add_link("retriever", "reranker", dest_key="nodes")
p.add_link("llm", "reranker", dest_key="query_str")
p.add_link("reranker", "summarizer", dest_key="nodes")
p.add_link("llm", "summarizer", dest_key="query_str")
print(summarizer.as_query_component().input_keys)
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(p.dag)
net.show("rag_dag.html")
response = p.run(topic="YC")
print(str(response))
response = await p.arun(topic="YC")
print(str(response))
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.query_pipeline import InputComponent
retriever = index.as_retriever(similarity_top_k=5)
summarizer = TreeSummarize(llm= | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
display_source_node(node)
from llama_index.core.tools import RetrieverTool
vector_retriever = | VectorIndexRetriever(index) | llama_index.core.retrievers.VectorIndexRetriever |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
class YourOpenAIAgent:
def __init__(
self,
tools: Sequence[BaseTool] = [],
llm: OpenAI = OpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
chat_history: List[ChatMessage] = [],
) -> None:
self._llm = llm
self._tools = {tool.metadata.name: tool for tool in tools}
self._chat_history = chat_history
def reset(self) -> None:
self._chat_history = []
def chat(self, message: str) -> str:
chat_history = self._chat_history
chat_history.append( | ChatMessage(role="user", content=message) | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
from pathlib import Path
import requests
wiki_titles = [
"batman",
"Vincent van Gogh",
"San Francisco",
"iPhone",
"Tesla Model S",
"BTS",
]
data_path = Path("data_wiki")
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
import wikipedia
import urllib.request
image_path = Path("data_wiki")
image_uuid = 0
image_metadata_dict = {}
MAX_IMAGES_PER_WIKI = 30
wiki_titles = [
"San Francisco",
"Batman",
"Vincent van Gogh",
"iPhone",
"Tesla Model S",
"BTS band",
]
if not image_path.exists():
Path.mkdir(image_path)
for title in wiki_titles:
images_per_wiki = 0
print(title)
try:
page_py = wikipedia.page(title)
list_img_urls = page_py.images
for url in list_img_urls:
if url.endswith(".jpg") or url.endswith(".png"):
image_uuid += 1
image_file_name = title + "_" + url.split("/")[-1]
image_metadata_dict[image_uuid] = {
"filename": image_file_name,
"img_path": "./" + str(image_path / f"{image_uuid}.jpg"),
}
urllib.request.urlretrieve(
url, image_path / f"{image_uuid}.jpg"
)
images_per_wiki += 1
if images_per_wiki > MAX_IMAGES_PER_WIKI:
break
except:
print(str(Exception("No images found for Wikipedia page: ")) + title)
continue
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
import qdrant_client
from llama_index.core import SimpleDirectoryReader
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core.indices import MultiModalVectorStoreIndex
client = qdrant_client.QdrantClient(path="qdrant_db")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
documents = SimpleDirectoryReader("./data_wiki/").load_data()
index = MultiModalVectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
from PIL import Image
import matplotlib.pyplot as plt
import os
def plot_images(image_metadata_dict):
original_images_urls = []
images_shown = 0
for image_id in image_metadata_dict:
img_path = image_metadata_dict[image_id]["img_path"]
if os.path.isfile(img_path):
filename = image_metadata_dict[image_id]["filename"]
image = Image.open(img_path).convert("RGB")
plt.subplot(8, 8, len(original_images_urls) + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
original_images_urls.append(filename)
images_shown += 1
if images_shown >= 64:
break
plt.tight_layout()
plot_images(image_metadata_dict)
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(2, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 9:
break
test_query = "who are BTS team members"
retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5)
retrieval_results = retriever.retrieve(test_query)
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.schema import ImageNode
retrieved_image = []
for res_node in retrieval_results:
if isinstance(res_node.node, ImageNode):
retrieved_image.append(res_node.node.metadata["file_path"])
else:
display_source_node(res_node, source_length=200)
plot_images(retrieved_image)
test_query = "what are Vincent van Gogh's famous paintings"
retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5)
retrieval_results = retriever.retrieve(test_query)
retrieved_image = []
for res_node in retrieval_results:
if isinstance(res_node.node, ImageNode):
retrieved_image.append(res_node.node.metadata["file_path"])
else:
display_source_node(res_node, source_length=200)
plot_images(retrieved_image)
test_query = "what is the popular tourist attraction in San Francisco"
retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5)
retrieval_results = retriever.retrieve(test_query)
retrieved_image = []
for res_node in retrieval_results:
if isinstance(res_node.node, ImageNode):
retrieved_image.append(res_node.node.metadata["file_path"])
else:
display_source_node(res_node, source_length=200)
plot_images(retrieved_image)
test_query = "which company makes Tesla"
retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5)
retrieval_results = retriever.retrieve(test_query)
retrieved_image = []
for res_node in retrieval_results:
if isinstance(res_node.node, ImageNode):
retrieved_image.append(res_node.node.metadata["file_path"])
else:
display_source_node(res_node, source_length=200)
plot_images(retrieved_image)
test_query = "what is the main character in Batman"
retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5)
retrieval_results = retriever.retrieve(test_query)
retrieved_image = []
for res_node in retrieval_results:
if isinstance(res_node.node, ImageNode):
retrieved_image.append(res_node.node.metadata["file_path"])
else:
| display_source_node(res_node, source_length=200) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system('pip install "llama_index>=0.9.7"')
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.extractors import TitleExtractor, SummaryExtractor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
def build_pipeline():
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
transformations = [
SentenceSplitter(chunk_size=1024, chunk_overlap=20),
TitleExtractor(
llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8
),
SummaryExtractor(
llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8
),
OpenAIEmbedding(),
]
return IngestionPipeline(transformations=transformations)
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
import time
times = []
for _ in range(3):
time.sleep(30) # help prevent rate-limits/timeouts, keeps each run fair
pipline = build_pipeline()
start = time.time()
nodes = await pipline.arun(documents=documents)
end = time.time()
times.append(end - start)
print(f"Average time: {sum(times) / len(times)}")
get_ipython().system('pip install "llama_index<0.9.6"')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.extractors import TitleExtractor, SummaryExtractor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
def build_pipeline():
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
transformations = [
SentenceSplitter(chunk_size=1024, chunk_overlap=20),
TitleExtractor(llm=llm, metadata_mode=MetadataMode.EMBED),
SummaryExtractor(llm=llm, metadata_mode=MetadataMode.EMBED),
| OpenAIEmbedding() | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/lyft"
)
lyft_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/uber"
)
uber_index = load_index_from_storage(storage_context)
index_loaded = True
except:
index_loaded = False
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
if not index_loaded:
lyft_docs = SimpleDirectoryReader(
input_files=["./data/10k/lyft_2021.pdf"]
).load_data()
uber_docs = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
lyft_index = VectorStoreIndex.from_documents(lyft_docs)
uber_index = | VectorStoreIndex.from_documents(uber_docs) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate')
import os
OPENAI_API_TOKEN = "sk-<your-openai-api-token>"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
REPLICATE_API_TOKEN = "" # Your Relicate API token here
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from pathlib import Path
input_image_path = Path("restaurant_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png')
from pydantic import BaseModel
class Restaurant(BaseModel):
"""Data model for an restaurant."""
restaurant: str
food: str
discount: str
price: str
rating: str
review: str
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader("./restaurant_images").load_data()
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1000
)
from PIL import Image
import matplotlib.pyplot as plt
imageUrl = "./restaurant_images/fried_chicken.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
openai_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Restaurant),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=openai_mm_llm,
verbose=True,
)
response = openai_program()
for res in response:
print(res)
from llama_index.multi_modal_llms.replicate import ReplicateMultiModal
from llama_index.multi_modal_llms.replicate.base import (
REPLICATE_MULTI_MODAL_LLM_MODELS,
)
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
def pydantic_replicate(
model_name, output_class, image_documents, prompt_template_str
):
mm_llm = ReplicateMultiModal(
model=REPLICATE_MULTI_MODAL_LLM_MODELS[model_name],
temperature=0.1,
max_new_tokens=1000,
)
llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(output_class),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=mm_llm,
verbose=True,
)
response = llm_program()
print(f"Model: {model_name}")
for res in response:
print(res)
pydantic_replicate("fuyu-8b", Restaurant, image_documents, prompt_template_str)
pydantic_replicate(
"llava-13b", Restaurant, image_documents, prompt_template_str
)
pydantic_replicate(
"minigpt-4", Restaurant, image_documents, prompt_template_str
)
pydantic_replicate("cogvlm", Restaurant, image_documents, prompt_template_str)
input_image_path = Path("amazon_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1p1Y1qAoM68eC4sAvvHaiJyPhdUZS0Gqb" -O ./amazon_images/amazon.png')
from pydantic import BaseModel
class Product(BaseModel):
"""Data model for a Amazon Product."""
title: str
category: str
discount: str
price: str
rating: str
review: str
description: str
inventory: str
imageUrl = "./amazon_images/amazon.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
amazon_image_documents = SimpleDirectoryReader("./amazon_images").load_data()
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
openai_program_amazon = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Product),
image_documents=amazon_image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=openai_mm_llm,
verbose=True,
)
response = openai_program_amazon()
for res in response:
print(res)
pydantic_replicate(
"fuyu-8b", Product, amazon_image_documents, prompt_template_str
)
pydantic_replicate(
"minigpt-4", Product, amazon_image_documents, prompt_template_str
)
pydantic_replicate(
"cogvlm", Product, amazon_image_documents, prompt_template_str
)
pydantic_replicate(
"llava-13b", Product, amazon_image_documents, prompt_template_str
)
input_image_path = Path("instagram_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=12ZpBBFkYu-jzz1iz356U5kMikn4uN9ww" -O ./instagram_images/jordan.png')
from pydantic import BaseModel
class InsAds(BaseModel):
"""Data model for a Ins Ads."""
account: str
brand: str
product: str
category: str
discount: str
price: str
comments: str
review: str
description: str
from PIL import Image
import matplotlib.pyplot as plt
imageUrl = "./instagram_images/jordan.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
ins_image_documents = SimpleDirectoryReader("./instagram_images").load_data()
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
openai_program_ins = MultiModalLLMCompletionProgram.from_defaults(
output_parser= | PydanticOutputParser(InsAds) | llama_index.core.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-readers-chroma')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.readers.chroma import ChromaReader
reader = ChromaReader(
collection_name="chroma_collection",
persist_directory="examples/data_connectors/chroma_collection",
)
query_vector = [n1, n2, n3, ...]
documents = reader.load_data(
collection_name="demo", query_vector=query_vector, limit=5
)
from llama_index.core import SummaryIndex
index = | SummaryIndex.from_documents(documents) | llama_index.core.SummaryIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate')
import os
OPENAI_API_TOKEN = "sk-<your-openai-api-token>"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
REPLICATE_API_TOKEN = "" # Your Relicate API token here
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from pathlib import Path
input_image_path = Path("restaurant_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png')
from pydantic import BaseModel
class Restaurant(BaseModel):
"""Data model for an restaurant."""
restaurant: str
food: str
discount: str
price: str
rating: str
review: str
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader("./restaurant_images").load_data()
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1000
)
from PIL import Image
import matplotlib.pyplot as plt
imageUrl = "./restaurant_images/fried_chicken.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
openai_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Restaurant),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=openai_mm_llm,
verbose=True,
)
response = openai_program()
for res in response:
print(res)
from llama_index.multi_modal_llms.replicate import ReplicateMultiModal
from llama_index.multi_modal_llms.replicate.base import (
REPLICATE_MULTI_MODAL_LLM_MODELS,
)
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
def pydantic_replicate(
model_name, output_class, image_documents, prompt_template_str
):
mm_llm = ReplicateMultiModal(
model=REPLICATE_MULTI_MODAL_LLM_MODELS[model_name],
temperature=0.1,
max_new_tokens=1000,
)
llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser= | PydanticOutputParser(output_class) | llama_index.core.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
from tqdm.asyncio import tqdm_asyncio
nest_asyncio.apply()
def displayify_df(df):
"""For pretty displaying DataFrame in a notebook."""
display_df = df.style.set_properties(
**{
"inline-size": "300px",
"overflow-wrap": "break-word",
}
)
display(display_df)
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
rag_dataset, documents = download_llama_dataset(
"EvaluatingLlmSurveyPaperDataset", "./data"
)
rag_dataset.to_pandas()[:5]
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
prediction_dataset = await rag_dataset.amake_predictions_with(
predictor=query_engine, batch_size=100, show_progress=True
)
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import (
AnswerRelevancyEvaluator,
ContextRelevancyEvaluator,
)
judges = {}
judges["answer_relevancy"] = AnswerRelevancyEvaluator(
llm=OpenAI(temperature=0, model="gpt-3.5-turbo"),
)
judges["context_relevancy"] = ContextRelevancyEvaluator(
llm= | OpenAI(temperature=0, model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
from llama_index.core.callbacks import CallbackManager
from llama_index.core.callbacks import LlamaDebugHandler
from llama_index.callbacks.wandb import WandbCallbackHandler
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
SimpleKeywordTableIndex,
StorageContext,
)
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-4", temperature=0)
import llama_index.core
from llama_index.core import set_global_handler
set_global_handler("wandb", run_args={"project": "llamaindex"})
wandb_callback = llama_index.core.global_handler
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
run_args = dict(
project="llamaindex",
)
wandb_callback = | WandbCallbackHandler(run_args=run_args) | llama_index.callbacks.wandb.WandbCallbackHandler |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=')
get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=')
get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma')
get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = | RetrieverQueryEngine.from_args(base_retriever, llm=llm) | llama_index.core.query_engine.RetrieverQueryEngine.from_args |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray')
get_ipython().system('pip install llama-index')
import os
import sys
import logging
import textwrap
import warnings
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from llama_index.core import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
Document,
)
from llama_index.vector_stores.docarray import DocArrayInMemoryVectorStore
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "<your openai key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
from llama_index.core import StorageContext
vector_store = DocArrayInMemoryVectorStore()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = GPTVectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What was a hard moment for the author?")
print(textwrap.fill(str(response), 100))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
},
),
]
from llama_index.core import StorageContext
vector_store = | DocArrayInMemoryVectorStore() | llama_index.vector_stores.docarray.DocArrayInMemoryVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
from llama_index.readers.file import ImageTabularChartReader
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from pathlib import Path
loader = ImageTabularChartReader(keep_image=True)
documents = loader.load_data(file=Path("./marine_chart.png"))
print(documents[0].text)
summary_index = | SummaryIndex.from_documents(documents) | llama_index.core.SummaryIndex.from_documents |
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray')
get_ipython().system('pip install llama-index')
import os
import sys
import logging
import textwrap
import warnings
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from llama_index.core import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
Document,
)
from llama_index.vector_stores.docarray import DocArrayHnswVectorStore
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "<your openai key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
from llama_index.core import StorageContext
vector_store = DocArrayHnswVectorStore(work_dir="hnsw_index")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = GPTVectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What was a hard moment for the author?")
print(textwrap.fill(str(response), 100))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
},
),
]
from llama_index.core import StorageContext
vector_store = DocArrayHnswVectorStore(work_dir="hnsw_filters")
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system('pip -q install python-dotenv pinecone-client llama-index pymupdf')
dotenv_path = (
"env" # Google Colabs will not let you open a .env, but you can set
)
with open(dotenv_path, "w") as f:
f.write('PINECONE_API_KEY="<your api key>"\n')
f.write('PINECONE_ENVIRONMENT="gcp-starter"\n')
f.write('OPENAI_API_KEY="<your api key>"\n')
import os
from dotenv import load_dotenv
load_dotenv(dotenv_path=dotenv_path)
import pinecone
api_key = os.environ["PINECONE_API_KEY"]
environment = os.environ["PINECONE_ENVIRONMENT"]
pinecone.init(api_key=api_key, environment=environment)
index_name = "llamaindex-rag-fs"
pinecone.create_index(
index_name, dimension=1536, metric="euclidean", pod_type="p1"
)
pinecone_index = pinecone.Index(index_name)
pinecone_index.delete(deleteAll=True)
from llama_index.vector_stores.pinecone import PineconeVectorStore
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
import fitz
file_path = "./data/llama2.pdf"
doc = fitz.open(file_path)
from llama_index.core.node_parser import SentenceSplitter
text_parser = SentenceSplitter(
chunk_size=1024,
)
text_chunks = []
doc_idxs = []
for doc_idx, page in enumerate(doc):
page_text = page.get_text("text")
cur_text_chunks = text_parser.split_text(page_text)
text_chunks.extend(cur_text_chunks)
doc_idxs.extend([doc_idx] * len(cur_text_chunks))
from llama_index.core.schema import TextNode
nodes = []
for idx, text_chunk in enumerate(text_chunks):
node = TextNode(
text=text_chunk,
)
src_doc_idx = doc_idxs[idx]
src_page = doc[src_doc_idx]
nodes.append(node)
print(nodes[0].metadata)
print(nodes[0].get_content(metadata_mode="all"))
from llama_index.core.extractors import (
QuestionsAnsweredExtractor,
TitleExtractor,
)
from llama_index.core.ingestion import IngestionPipeline
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
extractors = [
TitleExtractor(nodes=5, llm=llm),
| QuestionsAnsweredExtractor(questions=3, llm=llm) | llama_index.core.extractors.QuestionsAnsweredExtractor |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-together')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
domain = "docs.llamaindex.ai"
docs_url = "https://docs.llamaindex.ai/en/latest/"
get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}')
from llama_index.readers.file import UnstructuredReader
from pathlib import Path
from llama_index.llms.openai import OpenAI
from llama_index.core import Document
reader = | UnstructuredReader() | llama_index.readers.file.UnstructuredReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openrouter')
get_ipython().system('pip install llama-index')
from llama_index.llms.openrouter import OpenRouter
from llama_index.core.llms import ChatMessage
llm = OpenRouter(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="gryphe/mythomax-l2-13b",
)
message = ChatMessage(role="user", content="Tell me a joke")
resp = llm.chat([message])
print(resp)
message = | ChatMessage(role="user", content="Tell me a story in 250 words") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(vector_store_query_mode="mmr")
response = query_engine.query("What did the author do growing up?")
print(response)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine_with_threshold = index.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response = query_engine_with_threshold.query(
"What did the author do growing up?"
)
print(response)
index1 = VectorStoreIndex.from_documents(documents)
query_engine_no_mrr = index1.as_query_engine()
response_no_mmr = query_engine_no_mrr.query(
"What did the author do growing up?"
)
index2 = VectorStoreIndex.from_documents(documents)
query_engine_with_high_threshold = index2.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.8}
)
response_low_threshold = query_engine_with_high_threshold.query(
"What did the author do growing up?"
)
index3 = VectorStoreIndex.from_documents(documents)
query_engine_with_low_threshold = index3.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response_high_threshold = query_engine_with_low_threshold.query(
"What did the author do growing up?"
)
print(
"Scores without MMR ",
[node.score for node in response_no_mmr.source_nodes],
)
print(
"Scores with MMR and a threshold of 0.8 ",
[node.score for node in response_high_threshold.source_nodes],
)
print(
"Scores with MMR and a threshold of 0.2 ",
[node.score for node in response_low_threshold.source_nodes],
)
documents = SimpleDirectoryReader("../data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(
documents,
)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 0.1},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
from llama_index.core.response.notebook_utils import display_source_node
for n in nodes:
display_source_node(n, source_length=1000)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 0.5},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
for n in nodes:
display_source_node(n, source_length=1000)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 0.8},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
for n in nodes:
| display_source_node(n, source_length=1000) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().system(' pip install -q llama-index upstash-vector')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.vector_stores import UpstashVectorStore
from llama_index.core import StorageContext
import textwrap
import openai
openai.api_key = "sk-..."
get_ipython().system(" mkdir -p 'data/paul_graham/'")
get_ipython().system(" wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = | SimpleDirectoryReader("./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('pip install "google-generativeai" -q')
from llama_index.core.llama_dataset import download_llama_dataset
evaluator_dataset, _ = download_llama_dataset(
"MiniMtBenchSingleGradingDataset", "./mini_mt_bench_data"
)
evaluator_dataset.to_pandas()[:5]
from llama_index.core.evaluation import CorrectnessEvaluator
from llama_index.llms.openai import OpenAI
from llama_index.llms.gemini import Gemini
from llama_index.llms.cohere import Cohere
llm_gpt4 = OpenAI(temperature=0, model="gpt-4")
llm_gpt35 = | OpenAI(temperature=0, model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
from llama_index.llms.openai import OpenAI
from llama_index.core.query_engine import CitationQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY')
get_ipython().system('pip install llama-index pypdf')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = | VectorStoreIndex(base_nodes, embed_model=embed_model) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from genaix.list_corpora(client=client)
def delete_corpus(*, corpus_id: str) -> None:
client = genaix.build_semantic_retriever()
genaix.delete_corpus(corpus_id=corpus_id, client=client)
def cleanup_colab_corpora():
for corpus in list_corpora():
if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX):
try:
delete_corpus(corpus_id=corpus.corpus_id)
print(f"Deleted corpus {corpus.corpus_id}.")
except Exception:
pass
cleanup_colab_corpora()
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
from llama_index.core import Response
import time
index = GoogleIndex.create_corpus(
corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!"
)
print(f"Newly created corpus ID is {index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index.insert_documents(documents)
for corpus in list_corpora():
print(corpus)
query_engine = index.as_query_engine()
response = query_engine.query("What did Paul Graham do growing up?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine()
response = query_engine.query("Which company did Paul Graham build?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
index.insert_nodes(
[
TextNode(
text="It was the best of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="It was the worst of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="Bugs Bunny: Wassup doc?",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Bugs Bunny Adventure"},
)
},
),
]
)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
HarmCategory,
SafetySetting,
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
SafetySetting(
category=HarmCategory.HARM_CATEGORY_VIOLENCE,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH,
),
],
)
response = query_engine.query("What was Bugs Bunny's favorite saying?")
print(response)
from llama_index.core import Response
response = query_engine.query("What were Paul Graham's achievements?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
from llama_index.llms.gemini import Gemini
GEMINI_API_KEY = "" # @param {type:"string"}
gemini = | Gemini(api_key=GEMINI_API_KEY) | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
Settings.llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document, VectorStoreIndex
from llama_index.readers.file import PyMuPDFReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.llms.openai import OpenAI
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
node_parser = SimpleNodeParser.from_defaults()
nodes = node_parser.get_nodes_from_documents(docs)
len(nodes)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json')
from llama_index.core.evaluation import QueryResponseDataset
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
dataset_generator = DatasetGenerator(
nodes[:20],
llm=llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
BatchEvalRunner,
)
from llama_index.llms.openai import OpenAI
eval_llm = OpenAI(model="gpt-4-1106-preview")
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_runner = BatchEvalRunner(evaluator_dict, workers=2, show_progress=True)
import numpy as np
import time
import os
import pickle
from tqdm import tqdm
def get_responses_sync(
eval_qs, query_engine, show_progress=True, save_path=None
):
if show_progress:
eval_qs_iter = tqdm(eval_qs)
else:
eval_qs_iter = eval_qs
pred_responses = []
start_time = time.time()
for eval_q in eval_qs_iter:
print(f"eval q: {eval_q}")
pred_response = agent.query(eval_q)
print(f"predicted response: {pred_response}")
pred_responses.append(pred_response)
if save_path is not None:
avg_time = (time.time() - start_time) / len(pred_responses)
pickle.dump(
{"pred_responses": pred_responses, "avg_time": avg_time},
open(save_path, "wb"),
)
return pred_responses
async def run_evals(
query_engine,
eval_qa_pairs,
batch_runner,
disable_async_for_preds=False,
save_path=None,
):
eval_qs = [q for q, _ in eval_qa_pairs]
eval_answers = [a for _, a in eval_qa_pairs]
if save_path is not None:
if not os.path.exists(save_path):
start_time = time.time()
if disable_async_for_preds:
pred_responses = get_responses_sync(
eval_qs,
query_engine,
show_progress=True,
save_path=save_path,
)
else:
pred_responses = get_responses(
eval_qs, query_engine, show_progress=True
)
avg_time = (time.time() - start_time) / len(eval_qs)
pickle.dump(
{"pred_responses": pred_responses, "avg_time": avg_time},
open(save_path, "wb"),
)
else:
pickled_dict = pickle.load(open(save_path, "rb"))
pred_responses = pickled_dict["pred_responses"]
avg_time = pickled_dict["avg_time"]
else:
start_time = time.time()
pred_responses = get_responses(
eval_qs, query_engine, show_progress=True
)
avg_time = (time.time() - start_time) / len(eval_qs)
eval_results = await batch_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_answers
)
return eval_results, {"avg_time": avg_time}
from llama_index.agent.openai import OpenAIAssistantAgent
agent = OpenAIAssistantAgent.from_new(
name="SEC Analyst",
instructions="You are a QA assistant designed to analyze sec filings.",
openai_tools=[{"type": "retrieval"}],
instructions_prefix="Please address the user as Jerry.",
files=["data/llama2.pdf"],
verbose=True,
)
response = agent.query(
"What are the key differences between Llama 2 and Llama 2-Chat?"
)
print(str(response))
llm = OpenAI(model="gpt-4-1106-preview")
base_index = | VectorStoreIndex(nodes) | llama_index.core.VectorStoreIndex |
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.postprocessor import LLMRerank
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import Settings
from llama_index.packs.koda_retriever import KodaRetriever
from llama_index.core.evaluation import RetrieverEvaluator
from llama_index.core import SimpleDirectoryReader
import os
from pinecone import Pinecone
from llama_index.core.node_parser import SemanticSplitterNodeParser
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.evaluation import generate_qa_embedding_pairs
import pandas as pd
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index = pc.Index("llama2-paper") # this was previously created in my pinecone account
Settings.llm = OpenAI()
Settings.embed_model = OpenAIEmbedding()
vector_store = PineconeVectorStore(pinecone_index=index)
vector_index = VectorStoreIndex.from_vector_store(
vector_store=vector_store, embed_model=Settings.embed_model
)
reranker = LLMRerank(llm=Settings.llm) # optional
koda_retriever = KodaRetriever(
index=vector_index,
llm=Settings.llm,
reranker=reranker, # optional
verbose=True,
similarity_top_k=10,
)
vanilla_retriever = vector_index.as_retriever()
pipeline = IngestionPipeline(
transformations=[Settings.embed_model], vector_store=vector_store
)
def load_documents(file_path, num_pages=None):
if num_pages:
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()[
:num_pages
]
else:
documents = | SimpleDirectoryReader(input_files=[file_path]) | llama_index.core.SimpleDirectoryReader |
get_ipython().system('pip install llama-index-postprocessor-jinaai-rerank')
get_ipython().system('pip install llama-index-embeddings-jinaai')
get_ipython().system('pip install llama-index')
import os
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
)
from llama_index.embeddings.jinaai import JinaEmbedding
api_key = os.environ["JINA_API_KEY"]
jina_embeddings = | JinaEmbedding(api_key=api_key) | llama_index.embeddings.jinaai.JinaEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai-legacy')
get_ipython().system('pip install llama-index')
import json
from typing import Sequence
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/march"
)
march_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/june"
)
june_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/sept"
)
sept_index = load_index_from_storage(storage_context)
index_loaded = True
except:
index_loaded = False
get_ipython().system("mkdir -p 'data/10q/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'")
if not index_loaded:
march_docs = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_march_2022.pdf"]
).load_data()
june_docs = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_june_2022.pdf"]
).load_data()
sept_docs = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_sept_2022.pdf"]
).load_data()
march_index = VectorStoreIndex.from_documents(march_docs)
june_index = VectorStoreIndex.from_documents(june_docs)
sept_index = VectorStoreIndex.from_documents(sept_docs)
march_index.storage_context.persist(persist_dir="./storage/march")
june_index.storage_context.persist(persist_dir="./storage/june")
sept_index.storage_context.persist(persist_dir="./storage/sept")
march_engine = march_index.as_query_engine(similarity_top_k=3)
june_engine = june_index.as_query_engine(similarity_top_k=3)
sept_engine = sept_index.as_query_engine(similarity_top_k=3)
query_engine_tools = [
QueryEngineTool(
query_engine=march_engine,
metadata=ToolMetadata(
name="uber_march_10q",
description=(
"Provides information about Uber 10Q filings for March 2022. "
"Use a detailed plain text question as input to the tool."
),
),
),
QueryEngineTool(
query_engine=june_engine,
metadata=ToolMetadata(
name="uber_june_10q",
description=(
"Provides information about Uber financials for June 2021. "
"Use a detailed plain text question as input to the tool."
),
),
),
QueryEngineTool(
query_engine=sept_engine,
metadata=ToolMetadata(
name="uber_sept_10q",
description=(
"Provides information about Uber financials for Sept 2021. "
"Use a detailed plain text question as input to the tool."
),
),
),
]
from llama_index.core import Document
from llama_index.agent.openai_legacy import ContextRetrieverOpenAIAgent
texts = [
"Abbreviation: X = Revenue",
"Abbreviation: YZ = Risk Factors",
"Abbreviation: Z = Costs",
]
docs = [Document(text=t) for t in texts]
context_index = VectorStoreIndex.from_documents(docs)
context_agent = ContextRetrieverOpenAIAgent.from_tools_and_retriever(
query_engine_tools,
context_index.as_retriever(similarity_top_k=1),
verbose=True,
)
response = context_agent.chat("What is the YZ of March 2022?")
print(str(response))
context_agent.chat("What is the X and Z in September 2022?")
from llama_index.core.tools import BaseTool, FunctionTool
def magic_formula(revenue: int, cost: int) -> int:
"""Runs MAGIC_FORMULA on revenue and cost."""
return revenue - cost
magic_tool = | FunctionTool.from_defaults(fn=magic_formula, name="magic_formula") | llama_index.core.tools.FunctionTool.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core import Settings
from llama_index.llms.openai import OpenAI
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = llm
Settings.chunk_size = 512
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
import os
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
from llama_index.llms.openai import OpenAI
llm = OpenAI("gpt-4")
from llama_index.core.llama_pack import download_llama_pack
SelfDiscoverPack = download_llama_pack("SelfDiscoverPack", "./self_discover_pack")
self_discover_pack = | SelfDiscoverPack(verbose=True, llm=llm) | llama_index.packs.self_discover.SelfDiscoverPack |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.lancedb import LanceDBVectorStore
import textwrap
import openai
openai.api_key = ""
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print("Document ID:", documents[0].doc_id, "Document Hash:", documents[0].hash)
vector_store = | LanceDBVectorStore(uri="/tmp/lancedb") | llama_index.vector_stores.lancedb.LanceDBVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
import wikipedia
import urllib.request
from pathlib import Path
image_path = Path("mixed_wiki")
image_uuid = 0
image_metadata_dict = {}
MAX_IMAGES_PER_WIKI = 30
wiki_titles = [
"Vincent van Gogh",
"San Francisco",
"Batman",
"iPhone",
"Tesla Model S",
"BTS band",
]
if not image_path.exists():
Path.mkdir(image_path)
for title in wiki_titles:
images_per_wiki = 0
print(title)
try:
page_py = wikipedia.page(title)
list_img_urls = page_py.images
for url in list_img_urls:
if url.endswith(".jpg") or url.endswith(".png"):
image_uuid += 1
image_file_name = title + "_" + url.split("/")[-1]
image_metadata_dict[image_uuid] = {
"filename": image_file_name,
"img_path": "./" + str(image_path / f"{image_uuid}.jpg"),
}
urllib.request.urlretrieve(
url, image_path / f"{image_uuid}.jpg"
)
images_per_wiki += 1
if images_per_wiki > MAX_IMAGES_PER_WIKI:
break
except:
print(str(Exception("No images found for Wikipedia page: ")) + title)
continue
from PIL import Image
import matplotlib.pyplot as plt
import os
image_paths = []
for img_path in os.listdir("./mixed_wiki"):
image_paths.append(str(os.path.join("./mixed_wiki", img_path)))
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(3, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 9:
break
plot_images(image_paths)
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import SimpleDirectoryReader, StorageContext
import qdrant_client
from llama_index.core import SimpleDirectoryReader
client = qdrant_client.QdrantClient(path="qdrant_img_db")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
documents = SimpleDirectoryReader("./mixed_wiki/").load_data()
index = MultiModalVectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
input_image = "./mixed_wiki/2.jpg"
plot_images([input_image])
retriever_engine = index.as_retriever(image_similarity_top_k=4)
retrieval_results = retriever_engine.image_to_image_retrieve(
"./mixed_wiki/2.jpg"
)
retrieved_images = []
for res in retrieval_results:
retrieved_images.append(res.node.metadata["file_path"])
plot_images(retrieved_images[1:])
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
from llama_index.core.schema import ImageDocument
image_documents = [ImageDocument(image_path=input_image)]
for res_img in retrieved_images[1:]:
image_documents.append( | ImageDocument(image_path=res_img) | llama_index.core.schema.ImageDocument |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
)
from llama_index.core.query_engine.pandas import PandasInstructionParser
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'")
import pandas as pd
df = pd.read_csv("./titanic_train.csv")
instruction_str = (
"1. Convert the query to executable Python code using Pandas.\n"
"2. The final line of code should be a Python expression that can be called with the `eval()` function.\n"
"3. The code should represent a solution to the query.\n"
"4. PRINT ONLY THE EXPRESSION.\n"
"5. Do not quote the expression.\n"
)
pandas_prompt_str = (
"You are working with a pandas dataframe in Python.\n"
"The name of the dataframe is `df`.\n"
"This is the result of `print(df.head())`:\n"
"{df_str}\n\n"
"Follow these instructions:\n"
"{instruction_str}\n"
"Query: {query_str}\n\n"
"Expression:"
)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n\n"
"Pandas Instructions (optional):\n{pandas_instructions}\n\n"
"Pandas Output: {pandas_output}\n\n"
"Response: "
)
pandas_prompt = PromptTemplate(pandas_prompt_str).partial_format(
instruction_str=instruction_str, df_str=df.head(5)
)
pandas_output_parser = | PandasInstructionParser(df) | llama_index.core.query_engine.pandas.PandasInstructionParser |
get_ipython().system('pip install llama-index')
import logging
import sys
from IPython.display import Markdown, display
import pandas as pd
from llama_index.core.query_engine import PandasQueryEngine
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
df = pd.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
}
)
query_engine = PandasQueryEngine(df=df, verbose=True)
response = query_engine.query(
"What is the city with the highest population?",
)
display(Markdown(f"<b>{response}</b>"))
print(response.metadata["pandas_instruction_str"])
query_engine = | PandasQueryEngine(df=df, verbose=True, synthesize_response=True) | llama_index.core.query_engine.PandasQueryEngine |
import os
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
from llama_index.llms.openai import OpenAI
llm = | OpenAI("gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from os import getenv
from llama_index.core import SimpleDirectoryReader
from llama_index.vector_stores.opensearch import (
OpensearchVectorStore,
OpensearchVectorClient,
)
from llama_index.core import VectorStoreIndex, StorageContext
endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200")
idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
text_field = "content"
embedding_field = "embedding"
client = OpensearchVectorClient(
endpoint, idx, 1536, embedding_field=embedding_field, text_field=text_field
)
vector_store = OpensearchVectorStore(client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents=documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
res = query_engine.query("What did the author do growing up?")
res.response
from llama_index.core import Document
from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter
import regex as re
text_chunks = documents[0].text.split("\n\n")
footnotes = [
Document(
text=chunk,
id=documents[0].doc_id,
metadata={"is_footnote": bool(re.search(r"^\s*\[\d+\]\s*", chunk))},
)
for chunk in text_chunks
if bool(re.search(r"^\s*\[\d+\]\s*", chunk))
]
for f in footnotes:
index.insert(f)
footnote_query_engine = index.as_query_engine(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="term", value='{"metadata.is_footnote": "true"}'
),
ExactMatchFilter(
key="query_string",
value='{"query": "content: space AND content: lisp"}',
),
]
)
)
res = footnote_query_engine.query(
"What did the author about space aliens and lisp?"
)
res.response
from llama_index.readers.elasticsearch import ElasticsearchReader
rdr = ElasticsearchReader(endpoint, idx)
docs = rdr.load_data(text_field, embedding_field=embedding_field)
print("embedding dimension:", len(docs[0].embedding))
print("all fields in index:", docs[0].metadata.keys())
print("total number of chunks created:", len(docs))
docs = rdr.load_data(text_field, {"query": {"match": {text_field: "Lisp"}}})
print("chunks that mention Lisp:", len(docs))
docs = rdr.load_data(text_field, {"query": {"match": {text_field: "Yahoo"}}})
print("chunks that mention Yahoo:", len(docs))
from os import getenv
from llama_index.vector_stores.opensearch import (
OpensearchVectorStore,
OpensearchVectorClient,
)
endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200")
idx = getenv("OPENSEARCH_INDEX", "auto_retriever_movies")
text_field = "content"
embedding_field = "embedding"
client = OpensearchVectorClient(
endpoint,
idx,
4096,
embedding_field=embedding_field,
text_field=text_field,
search_pipeline="hybrid-search-pipeline",
)
from llama_index.embeddings.ollama import OllamaEmbedding
embed_model = OllamaEmbedding(model_name="llama2")
vector_store = OpensearchVectorStore(client)
from llama_index.core.schema import TextNode
from llama_index.core import VectorStoreIndex, StorageContext
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama_hub')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.readers.file import UnstructuredReader
from llama_index.readers.file import PyMuPDFReader
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = | SentenceSplitter(chunk_size=1024) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4")
node_parser = SentenceSplitter(chunk_size=1024)
nodes = node_parser.get_nodes_from_documents(documents)
index = VectorStoreIndex(nodes)
query_engine = index.as_query_engine(llm=llm)
from llama_index.core.schema import BaseNode
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core import ChatPromptTemplate, PromptTemplate
from typing import Tuple, List
import re
llm = OpenAI(model="gpt-4")
QA_PROMPT = PromptTemplate(
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Query: {query_str}\n"
"Answer: "
)
def generate_answers_for_questions(
questions: List[str], context: str, llm: OpenAI
) -> str:
"""Generate answers for questions given context."""
answers = []
for question in questions:
fmt_qa_prompt = QA_PROMPT.format(
context_str=context, query_str=question
)
response_obj = llm.complete(fmt_qa_prompt)
answers.append(str(response_obj))
return answers
QUESTION_GEN_USER_TMPL = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"generate the relevant questions. "
)
QUESTION_GEN_SYS_TMPL = """\
You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided.\
"""
question_gen_template = ChatPromptTemplate(
message_templates=[
| ChatMessage(role=MessageRole.SYSTEM, content=QUESTION_GEN_SYS_TMPL) | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-readers-github')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
import os
os.environ["GITHUB_TOKEN"] = "<your github token>"
import os
from llama_index.readers.github import (
GitHubRepositoryIssuesReader,
GitHubIssuesClient,
)
github_client = GitHubIssuesClient()
loader = GitHubRepositoryIssuesReader(
github_client,
owner="jerryjliu",
repo="llama_index",
verbose=True,
)
docs = loader.load_data()
docs[10].text
docs[10].metadata
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from pydantic import BaseModel
from typing import List
from tqdm.asyncio import asyncio
from llama_index.program.openai import OpenAIPydanticProgram
from llama_index.llms.openai import OpenAI
from llama_index.core.async_utils import batch_gather
prompt_template_str = """\
Here is a Github Issue ticket.
{ticket}
Please extract central themes and output a list of tags.\
"""
class TagList(BaseModel):
"""A list of tags corresponding to central themes of an issue."""
tags: List[str]
program = OpenAIPydanticProgram.from_defaults(
prompt_template_str=prompt_template_str,
output_cls=TagList,
)
tasks = [program.acall(ticket=doc) for doc in docs]
output = await batch_gather(tasks, batch_size=10, verbose=True)
import pickle
with open("github_issue_analysis_data.pkl", "wb") as f:
pickle.dump(tag_lists, f)
with open("github_issue_analysis_data.pkl", "rb") as f:
tag_lists = pickle.load(f)
print(f"Loaded tag lists for {len(tag_lists)} tickets")
prompt = """
Here is a list of central themes (in the form of tags) extracted from a list of Github Issue tickets.
Tags for each ticket is separated by 2 newlines.
{tag_lists_str}
Please summarize the key takeaways and what we should prioritize to fix.
"""
tag_lists_str = "\n\n".join([str(tag_list) for tag_list in tag_lists])
prompt = prompt.format(tag_lists_str=tag_lists_str)
from llama_index.llms.openai import OpenAI
response = | OpenAI(model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-bedrock')
get_ipython().system('pip install llama-index')
from llama_index.llms.bedrock import Bedrock
profile_name = "Your aws profile name"
resp = Bedrock(
model="amazon.titan-text-express-v1", profile_name=profile_name
).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.bedrock import Bedrock
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
| ChatMessage(role="user", content="Tell me a story") | llama_index.core.llms.ChatMessage |