prompt
stringlengths 70
19.8k
| completion
stringlengths 8
303
| api
stringlengths 23
93
|
---|---|---|
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = PyMuPDFReader().load_data(
file_path="./llama2.pdf", metadata=True
)
attention_docs = PyMuPDFReader().load_data(
file_path="./attention.pdf", metadata=True
)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core.node_parser import TokenTextSplitter
nodes = TokenTextSplitter(
chunk_size=1024, chunk_overlap=128
).get_nodes_from_documents(llama2_docs + attention_docs)
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes=nodes)
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=docstore, similarity_top_k=2
)
from llama_index.core.schema import IndexNode
vector_obj = IndexNode(
index_id="vector", obj=vector_retriever, text="Vector Retriever"
)
bm25_obj = IndexNode(
index_id="bm25", obj=bm25_retriever, text="BM25 Retriever"
)
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(objects=[vector_obj, bm25_obj])
query_engine = summary_index.as_query_engine(
response_mode="tree_summarize", verbose=True
)
response = await query_engine.aquery(
"How does attention work in transformers?"
)
print(str(response))
response = await query_engine.aquery(
"What is the architecture of Llama2 based on?"
)
print(str(response))
response = await query_engine.aquery(
"What was used before attention in transformers?"
)
print(str(response))
docstore.persist("./docstore.json")
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
docstore = SimpleDocumentStore.from_persist_path("./docstore.json")
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
index = VectorStoreIndex.from_vector_store(vector_store)
vector_retriever = index.as_retriever(similarity_top_k=2)
bm25_retriever = BM25Retriever.from_defaults(
docstore=docstore, similarity_top_k=2
)
from llama_index.core.schema import IndexNode
vector_obj = IndexNode(
index_id="vector", obj=vector_retriever, text="Vector Retriever"
)
bm25_obj = IndexNode(
index_id="bm25", obj=bm25_retriever, text="BM25 Retriever"
)
from llama_index.core import SummaryIndex
summary_index = | SummaryIndex(objects=[vector_obj, bm25_obj]) | llama_index.core.SummaryIndex |
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from llama_index import download_loader
from base import DocugamiReader
docset_id = "ecxqpipcoe2p"
document_ids = ["43rj0ds7s0ur", "bpc1vibyeke2"]
loader = DocugamiReader()
documents = loader.load_data(docset_id=docset_id, document_ids=document_ids)
from llama_index import VectorStoreIndex
docset_id = "wh2kned25uqm"
documents = loader.load_data(docset_id=docset_id)
for d in documents:
stripped_metadata = d.metadata.copy()
for key in d.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
d.metadata = stripped_metadata
documents
index = | VectorStoreIndex.from_documents(documents) | llama_index.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-cohere')
get_ipython().system('pip install llama-index cohere pypdf')
openai_api_key = "YOUR OPENAI API KEY"
cohere_api_key = "YOUR COHEREAI API KEY"
import os
os.environ["OPENAI_API_KEY"] = openai_api_key
os.environ["COHERE_API_KEY"] = cohere_api_key
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.cohere import CohereEmbedding
from llama_index.core.retrievers import BaseRetriever, VectorIndexRetriever
from llama_index.core import QueryBundle
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWithScore
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.evaluation import EmbeddingQAFinetuneDataset
from llama_index.finetuning import generate_cohere_reranker_finetuning_dataset
from llama_index.core.evaluation import generate_question_context_pairs
from llama_index.core.evaluation import RetrieverEvaluator
from llama_index.finetuning import CohereRerankerFinetuneEngine
from typing import List
import pandas as pd
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
lyft_docs = SimpleDirectoryReader(
input_files=["./data/10k/lyft_2021.pdf"]
).load_data()
uber_docs = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
node_parser = | SimpleNodeParser.from_defaults(chunk_size=400) | llama_index.core.node_parser.SimpleNodeParser.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True)
output = p.run(movie_name="The Departed")
print(str(output))
from typing import List
from pydantic import BaseModel, Field
from llama_index.core.output_parsers import PydanticOutputParser
class Movie(BaseModel):
"""Object representing a single movie."""
name: str = Field(..., description="Name of the movie.")
year: int = Field(..., description="Year of the movie.")
class Movies(BaseModel):
"""Object representing a list of movies."""
movies: List[Movie] = Field(..., description="List of movies.")
llm = OpenAI(model="gpt-3.5-turbo")
output_parser = | PydanticOutputParser(Movies) | llama_index.core.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True)
output = p.run(movie_name="The Departed")
print(str(output))
from typing import List
from pydantic import BaseModel, Field
from llama_index.core.output_parsers import PydanticOutputParser
class Movie(BaseModel):
"""Object representing a single movie."""
name: str = Field(..., description="Name of the movie.")
year: int = Field(..., description="Year of the movie.")
class Movies(BaseModel):
"""Object representing a list of movies."""
movies: List[Movie] = Field(..., description="List of movies.")
llm = OpenAI(model="gpt-3.5-turbo")
output_parser = PydanticOutputParser(Movies)
json_prompt_str = """\
Please generate related movies to {movie_name}. Output with the following JSON format:
"""
json_prompt_str = output_parser.format(json_prompt_str)
json_prompt_tmpl = PromptTemplate(json_prompt_str)
p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True)
output = p.run(movie_name="Toy Story")
output
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
prompt_str2 = """\
Here's some text:
{text}
Can you rewrite this with a summary of each movie?
"""
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
llm_c = llm.as_query_component(streaming=True)
p = QueryPipeline(
chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True
)
output = p.run(movie_name="The Dark Knight")
for o in output:
print(o.delta, end="")
p = QueryPipeline(
chain=[
json_prompt_tmpl,
llm.as_query_component(streaming=True),
output_parser,
],
verbose=True,
)
output = p.run(movie_name="Toy Story")
print(output)
from llama_index.postprocessor.cohere_rerank import CohereRerank
prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl1 = PromptTemplate(prompt_str1)
prompt_str2 = (
"Please write a passage to answer the question\n"
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{query_str}\n"
"\n"
"\n"
'Passage:"""\n'
)
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=5)
p = QueryPipeline(
chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever], verbose=True
)
nodes = p.run(topic="college")
len(nodes)
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
prompt_str = "Please generate a question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=3)
reranker = | CohereRerank() | llama_index.postprocessor.cohere_rerank.CohereRerank |
import openai
from llama_index.agent import OpenAIAgent
openai.api_key = "sk-your-key"
from llama_index.tools.multion.base import MultionToolSpec
multion_tool = | MultionToolSpec() | llama_index.tools.multion.base.MultionToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
import getpass
import openai
openai.api_key = "sk-"
import chromadb
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.chroma import ChromaVectorStore
from IPython.display import Markdown, display
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
from llama_index.core import StorageContext
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", value="Mafia"),
MetadataFilter(key="year", value=1972),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import FilterOperator, FilterCondition
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", value="Fiction"),
| MetadataFilter(key="year", value=1997, operator=FilterOperator.GT) | llama_index.core.vector_stores.MetadataFilter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = CohereRerank(top_n=10)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
response = query_engine.query(
"Tell me about the main aspects of safety fine-tuning"
)
display_response(
response, show_source=True, source_length=500, show_source_metadata=True
)
from collections import defaultdict
import pandas as pd
def mrr_all(metadata_values, metadata_key, source_nodes):
value_to_mrr_dict = {}
for metadata_value in metadata_values:
mrr = 0
for idx, source_node in enumerate(source_nodes):
if source_node.node.metadata[metadata_key] == metadata_value:
mrr = 1 / (idx + 1)
break
else:
continue
value_to_mrr_dict[metadata_value] = mrr
df = pd.DataFrame(value_to_mrr_dict, index=["MRR"])
df.style.set_caption("Mean Reciprocal Rank")
return df
print("Mean Reciprocal Rank for each Chunk Size")
mrr_all(chunk_sizes, "chunk_size", response.source_nodes)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_llm = OpenAI(model="gpt-4")
dataset_generator = DatasetGenerator(
nodes_list[-1],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=2,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
import asyncio
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
RelevancyEvaluator,
FaithfulnessEvaluator,
PairwiseComparisonEvaluator,
)
evaluator_c = | CorrectnessEvaluator(llm=eval_llm) | llama_index.core.evaluation.CorrectnessEvaluator |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(
input_files=["./data/paul_graham/paul_graham_essay.txt"]
)
docs = reader.load_data()
text = docs[0].text
from llama_index.core import PromptTemplate
qa_prompt_tmpl = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Please also write the answer in the style of {tone_name}.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt = PromptTemplate(qa_prompt_tmpl)
refine_prompt_tmpl = (
"The original query is as follows: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the query. "
"Please also write the answer in the style of {tone_name}.\n"
"If the context isn't useful, return the original answer.\n"
"Refined Answer: "
)
refine_prompt = PromptTemplate(refine_prompt_tmpl)
from llama_index.core.response_synthesizers import TreeSummarize, Refine
from llama_index.core.types import BaseModel
from typing import List
summarizer = | TreeSummarize(verbose=True, summary_template=qa_prompt) | llama_index.core.response_synthesizers.TreeSummarize |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = PyMuPDFReader().load_data(
file_path="./llama2.pdf", metadata=True
)
attention_docs = | PyMuPDFReader() | llama_index.readers.file.PyMuPDFReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-packs-trulens-eval-packs')
get_ipython().system('pip install trulens-eval llama-hub html2text llama-index')
import os
from llama_index.packs.trulens_eval_packs import (
TruLensRAGTriadPack,
TruLensHarmlessPack,
TruLensHelpfulPack,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.readers.web import SimpleWebPageReader
from tqdm.auto import tqdm
os.environ["OPENAI_API_KEY"] = "sk-..."
documents = | SimpleWebPageReader(html_to_text=True) | llama_index.readers.web.SimpleWebPageReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-github')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index llama-hub')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["GITHUB_TOKEN"] = "ghp_..."
os.environ["OPENAI_API_KEY"] = "sk-..."
import os
from llama_index.readers.github import (
GitHubRepositoryIssuesReader,
GitHubIssuesClient,
)
github_client = GitHubIssuesClient()
loader = GitHubRepositoryIssuesReader(
github_client,
owner="run-llama",
repo="llama_index",
verbose=True,
)
orig_docs = loader.load_data()
limit = 100
docs = []
for idx, doc in enumerate(orig_docs):
doc.metadata["index_id"] = int(doc.id_)
if idx >= limit:
break
docs.append(doc)
import weaviate
auth_config = weaviate.AuthApiKey(
api_key="XRa15cDIkYRT7AkrpqT6jLfE4wropK1c1TGk"
)
client = weaviate.Client(
"https://llama-index-test-v0oggsoz.weaviate.network",
auth_client_secret=auth_config,
)
class_name = "LlamaIndex_docs"
client.schema.delete_class(class_name)
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name=class_name
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
doc_index = VectorStoreIndex.from_documents(
docs, storage_context=storage_context
)
from llama_index.core import SummaryIndex
from llama_index.core.async_utils import run_jobs
from llama_index.llms.openai import OpenAI
from llama_index.core.schema import IndexNode
from llama_index.core.vector_stores import (
FilterOperator,
MetadataFilter,
MetadataFilters,
)
async def aprocess_doc(doc, include_summary: bool = True):
"""Process doc."""
metadata = doc.metadata
date_tokens = metadata["created_at"].split("T")[0].split("-")
year = int(date_tokens[0])
month = int(date_tokens[1])
day = int(date_tokens[2])
assignee = (
"" if "assignee" not in doc.metadata else doc.metadata["assignee"]
)
size = ""
if len(doc.metadata["labels"]) > 0:
size_arr = [l for l in doc.metadata["labels"] if "size:" in l]
size = size_arr[0].split(":")[1] if len(size_arr) > 0 else ""
new_metadata = {
"state": metadata["state"],
"year": year,
"month": month,
"day": day,
"assignee": assignee,
"size": size,
}
summary_index = SummaryIndex.from_documents([doc])
query_str = "Give a one-sentence concise summary of this issue."
query_engine = summary_index.as_query_engine(
llm=OpenAI(model="gpt-3.5-turbo")
)
summary_txt = await query_engine.aquery(query_str)
summary_txt = str(summary_txt)
index_id = doc.metadata["index_id"]
filters = MetadataFilters(
filters=[
MetadataFilter(
key="index_id", operator=FilterOperator.EQ, value=int(index_id)
),
]
)
index_node = IndexNode(
text=summary_txt,
metadata=new_metadata,
obj=doc_index.as_retriever(filters=filters),
index_id=doc.id_,
)
return index_node
async def aprocess_docs(docs):
"""Process metadata on docs."""
index_nodes = []
tasks = []
for doc in docs:
task = aprocess_doc(doc)
tasks.append(task)
index_nodes = await | run_jobs(tasks, show_progress=True, workers=3) | llama_index.core.async_utils.run_jobs |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-txtai')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import txtai
txtai_index = txtai.ann.ANNFactory.create({"backend": "numpy"})
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.txtai import TxtaiVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = | TxtaiVectorStore(txtai_index=txtai_index) | llama_index.vector_stores.txtai.TxtaiVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = | FunctionTool.from_defaults(fn=add) | llama_index.core.tools.FunctionTool.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = OpenAI(model="gpt-3.5-turbo-1106")
agent = OpenAIAgent.from_tools(
[multiply_tool, add_tool], llm=llm, verbose=True
)
response = agent.chat("What is (121 * 3) + 42?")
print(str(response))
response = agent.stream_chat("What is (121 * 3) + 42?")
import nest_asyncio
nest_asyncio.apply()
response = await agent.achat("What is (121 * 3) + 42?")
print(str(response))
response = await agent.astream_chat("What is (121 * 3) + 42?")
response_gen = response.response_gen
async for token in response.async_response_gen():
print(token, end="")
import json
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps(
{"location": location, "temperature": "10", "unit": "celsius"}
)
elif "san francisco" in location.lower():
return json.dumps(
{"location": location, "temperature": "72", "unit": "fahrenheit"}
)
else:
return json.dumps(
{"location": location, "temperature": "22", "unit": "celsius"}
)
weather_tool = FunctionTool.from_defaults(fn=get_current_weather)
llm = OpenAI(model="gpt-3.5-turbo-1106")
agent = OpenAIAgent.from_tools([weather_tool], llm=llm, verbose=True)
response = agent.chat(
"What's the weather like in San Francisco, Tokyo, and Paris?"
)
llm = | OpenAI(model="gpt-3.5-turbo-0613") | llama_index.llms.openai.OpenAI |
import openai
openai.api_key = "sk-you-key"
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
from llama_index.tools.zapier.base import ZapierToolSpec
zapier_spec = ZapierToolSpec(api_key="sk-ak-your-key")
tools = zapier_spec.to_tool_list()
llm = OpenAI(model="gpt-4-0613")
agent = | OpenAIAgent.from_tools(tools, verbose=True, llm=llm) | llama_index.agent.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.storage.kvstore.firestore import FirestoreKVStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.index_store.firestore import FirestoreIndexStore
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore=FirestoreDocumentStore(kvstore),
index_store= | FirestoreIndexStore(kvstore) | llama_index.storage.index_store.firestore.FirestoreIndexStore |
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
nodes = Settings.get_nodes_from_documents(documents)
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
from llama_index.core import SimpleKeywordTableIndex, VectorStoreIndex
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)
from llama_index.core import QueryBundle
from llama_index.core.schema import NodeWithScore
from llama_index.core.retrievers import (
BaseRetriever,
VectorIndexRetriever,
KeywordTableSimpleRetriever,
)
from typing import List
class CustomRetriever(BaseRetriever):
"""Custom retriever that performs both semantic search and hybrid search."""
def __init__(
self,
vector_retriever: VectorIndexRetriever,
keyword_retriever: KeywordTableSimpleRetriever,
mode: str = "AND",
) -> None:
"""Init params."""
self._vector_retriever = vector_retriever
self._keyword_retriever = keyword_retriever
if mode not in ("AND", "OR"):
raise ValueError("Invalid mode.")
self._mode = mode
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query."""
vector_nodes = self._vector_retriever.retrieve(query_bundle)
keyword_nodes = self._keyword_retriever.retrieve(query_bundle)
vector_ids = {n.node.node_id for n in vector_nodes}
keyword_ids = {n.node.node_id for n in keyword_nodes}
combined_dict = {n.node.node_id: n for n in vector_nodes}
combined_dict.update({n.node.node_id: n for n in keyword_nodes})
if self._mode == "AND":
retrieve_ids = vector_ids.intersection(keyword_ids)
else:
retrieve_ids = vector_ids.union(keyword_ids)
retrieve_nodes = [combined_dict[rid] for rid in retrieve_ids]
return retrieve_nodes
from llama_index.core import get_response_synthesizer
from llama_index.core.query_engine import RetrieverQueryEngine
vector_retriever = | VectorIndexRetriever(index=vector_index, similarity_top_k=2) | llama_index.core.retrievers.VectorIndexRetriever |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
len(eval_dataset.qr_pairs)
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
nodes = pipeline.run(documents=docs)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_eval_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_responses_ref
)
return eval_results
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0)
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600)
html_parser = HTMLNodeParser.from_defaults()
parser_dict = {
"sent_parser_o0": sent_parser_o0,
"sent_parser_o200": sent_parser_o200,
"sent_parser_o500": sent_parser_o500,
}
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
pipeline_dict = {}
for k, parser in parser_dict.items():
pipeline = IngestionPipeline(
documents=docs,
transformations=[
html_parser,
parser,
OpenAIEmbedding(),
],
)
pipeline_dict[k] = pipeline
eval_results_dict = {}
for k, pipeline in pipeline_dict.items():
eval_results = await run_evals(
pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs
)
eval_results_dict[k] = eval_results
import pickle
pickle.dump(eval_results_dict, open("eval_results_1.pkl", "wb"))
eval_results_list = list(eval_results_dict.items())
results_df = get_results_df(
[v for _, v in eval_results_list],
[k for k, _ in eval_results_list],
["correctness", "semantic_similarity"],
)
display(results_df)
for k, pipeline in pipeline_dict.items():
pipeline.cache.persist(f"./cache/{k}.json")
from llama_index.core.extractors import (
TitleExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
)
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
extractor_dict = {
"summary": SummaryExtractor(in_place=False),
"qa": QuestionsAnsweredExtractor(in_place=False),
"default": None,
}
html_parser = | HTMLNodeParser.from_defaults() | llama_index.core.node_parser.HTMLNodeParser.from_defaults |
get_ipython().system('pip install llama-index-multi-modal-llms-ollama')
get_ipython().system('pip install llama-index-readers-file')
get_ipython().system('pip install unstructured')
get_ipython().system('pip install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index-embeddings-clip')
from llama_index.multi_modal_llms.ollama import OllamaMultiModal
mm_model = OllamaMultiModal(model="llava:13b")
from pathlib import Path
from llama_index.core import SimpleDirectoryReader
from PIL import Image
import matplotlib.pyplot as plt
input_image_path = Path("restaurant_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png')
image_documents = SimpleDirectoryReader("./restaurant_images").load_data()
imageUrl = "./restaurant_images/fried_chicken.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
from pydantic import BaseModel
class Restaurant(BaseModel):
"""Data model for an restaurant."""
restaurant: str
food: str
discount: str
price: str
rating: str
review: str
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
{query_str}
Return the answer as a Pydantic object. The Pydantic schema is given below:
"""
mm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Restaurant),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=mm_model,
verbose=True,
)
response = mm_program(query_str="Can you summarize what is in the image?")
for res in response:
print(res)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg')
from pathlib import Path
from llama_index.readers.file import UnstructuredReader
from llama_index.core.schema import ImageDocument
loader = | UnstructuredReader() | llama_index.readers.file.UnstructuredReader |
import openai
from llama_index.agent import OpenAIAgent
openai.api_key = "sk-your-key"
from llama_index.tools.multion.base import MultionToolSpec
multion_tool = MultionToolSpec()
from llama_index.tools.gmail.base import GmailToolSpec
from llama_index.tools.ondemand_loader_tool import OnDemandLoaderTool
gmail_tool = | GmailToolSpec() | llama_index.tools.gmail.base.GmailToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-colbert')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install torch sentence-transformers')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.indices.managed.google import GoogleIndex
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
project_name = "TODO-your-project-name" # @param {type:"string"}
email = "[email protected]" # @param {type:"string"}
client_file_name = "client_secret.json"
get_ipython().system('gcloud config set project $project_name')
get_ipython().system('gcloud config set account $email')
get_ipython().system('gcloud auth application-default login --no-browser --client-id-file=$client_file_name --scopes="https://www.googleapis.com/auth/generative-language.retriever,https://www.googleapis.com/auth/cloud-platform"')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
google_index = GoogleIndex.create_corpus(display_name="My first corpus!")
print(f"Newly created corpus ID is {google_index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
google_index.insert_documents(documents)
google_index = GoogleIndex.from_corpus(corpus_id="")
query_engine = google_index.as_query_engine()
response = query_engine.query("which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.VERBOSE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.EXTRACTIVE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.llms.gemini import Gemini
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.embeddings.gemini import GeminiEmbedding
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.7, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
)
reranker = LLMRerank(
top_n=5,
llm= | Gemini(api_key=GOOGLE_API_KEY) | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
SimpleKeywordTableIndex,
)
from llama_index.core import SummaryIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
summary_index = | SummaryIndex(nodes, storage_context=storage_context) | llama_index.core.SummaryIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
)
from llama_index.core.query_engine.pandas import PandasInstructionParser
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'")
import pandas as pd
df = pd.read_csv("./titanic_train.csv")
instruction_str = (
"1. Convert the query to executable Python code using Pandas.\n"
"2. The final line of code should be a Python expression that can be called with the `eval()` function.\n"
"3. The code should represent a solution to the query.\n"
"4. PRINT ONLY THE EXPRESSION.\n"
"5. Do not quote the expression.\n"
)
pandas_prompt_str = (
"You are working with a pandas dataframe in Python.\n"
"The name of the dataframe is `df`.\n"
"This is the result of `print(df.head())`:\n"
"{df_str}\n\n"
"Follow these instructions:\n"
"{instruction_str}\n"
"Query: {query_str}\n\n"
"Expression:"
)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n\n"
"Pandas Instructions (optional):\n{pandas_instructions}\n\n"
"Pandas Output: {pandas_output}\n\n"
"Response: "
)
pandas_prompt = PromptTemplate(pandas_prompt_str).partial_format(
instruction_str=instruction_str, df_str=df.head(5)
)
pandas_output_parser = | PandasInstructionParser(df) | llama_index.core.query_engine.pandas.PandasInstructionParser |
from llama_index.agent import OpenAIAgent
import openai
openai.api_key = "sk-your-key"
from llama_index.tools.yelp.base import YelpToolSpec
from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec
tool_spec = YelpToolSpec(api_key="your-key", client_id="your-id")
tools = tool_spec.to_tool_list()
agent = OpenAIAgent.from_tools(
[
*LoadAndSearchToolSpec.from_defaults(tools[0]).to_tool_list(),
* | LoadAndSearchToolSpec.from_defaults(tools[1]) | llama_index.tools.tool_spec.load_and_search.base.LoadAndSearchToolSpec.from_defaults |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
import requests
import yaml
f = requests.get(
"https://raw.githubusercontent.com/sisbell/chatgpt-plugin-store/main/manifests/today-currency-converter.oiconma.repl.co.json"
).text
manifest = yaml.safe_load(f)
from llama_index.tools.chatgpt_plugin.base import ChatGPTPluginToolSpec
from llama_index.tools.requests.base import RequestsToolSpec
requests_spec = RequestsToolSpec()
plugin_spec = ChatGPTPluginToolSpec(manifest)
plugin_spec = | ChatGPTPluginToolSpec(
manifest_url="https://raw.githubusercontent.com/sisbell/chatgpt-plugin-store/main/manifests/today-currency-converter.oiconma.repl.co.json"
) | llama_index.tools.chatgpt_plugin.base.ChatGPTPluginToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai pandas[jinja2] spacy')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
TreeIndex,
VectorStoreIndex,
SimpleDirectoryReader,
Response,
)
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import RelevancyEvaluator
from llama_index.core.node_parser import SentenceSplitter
import pandas as pd
pd.set_option("display.max_colwidth", 0)
gpt3 = OpenAI(temperature=0, model="gpt-3.5-turbo")
gpt4 = OpenAI(temperature=0, model="gpt-4")
evaluator = | RelevancyEvaluator(llm=gpt3) | llama_index.core.evaluation.RelevancyEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.node_parser import SentenceWindowNodeParser
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
text_splitter = SentenceSplitter()
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-mpnet-base-v2", max_length=512
)
from llama_index.core import Settings
Settings.llm = llm
Settings.embed_model = embed_model
Settings.text_splitter = text_splitter
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["./IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
nodes = node_parser.get_nodes_from_documents(documents)
base_nodes = text_splitter.get_nodes_from_documents(documents)
from llama_index.core import VectorStoreIndex
sentence_index = VectorStoreIndex(nodes)
base_index = VectorStoreIndex(base_nodes)
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
query_engine = sentence_index.as_query_engine(
similarity_top_k=2,
node_postprocessors=[
| MetadataReplacementPostProcessor(target_metadata_key="window") | llama_index.core.postprocessor.MetadataReplacementPostProcessor |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.schema import TextNode
from llama_index.core.indices.managed.types import ManagedIndexQueryMode
from llama_index.indices.managed.vectara import VectaraIndex
from llama_index.indices.managed.vectara import VectaraAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
from llama_index.llms.openai import OpenAI
nodes = [
TextNode(
text=(
"A pragmatic paleontologist touring an almost complete theme park on an island "
+ "in Central America is tasked with protecting a couple of kids after a power "
+ "failure causes the park's cloned dinosaurs to run loose."
),
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
TextNode(
text=(
"A thief who steals corporate secrets through the use of dream-sharing technology "
+ "is given the inverse task of planting an idea into the mind of a C.E.O., "
+ "but his tragic past may doom the project and his team to disaster."
),
metadata={
"year": 2010,
"director": "Christopher Nolan",
"rating": 8.2,
},
),
TextNode(
text="Barbie suffers a crisis that leads her to question her world and her existence.",
metadata={
"year": 2023,
"director": "Greta Gerwig",
"genre": "fantasy",
"rating": 9.5,
},
),
TextNode(
text=(
"A cowboy doll is profoundly threatened and jealous when a new spaceman action "
+ "figure supplants him as top toy in a boy's bedroom."
),
metadata={"year": 1995, "genre": "animated", "rating": 8.3},
),
TextNode(
text=(
"When Woody is stolen by a toy collector, Buzz and his friends set out on a "
+ "rescue mission to save Woody before he becomes a museum toy property with his "
+ "roundup gang Jessie, Prospector, and Bullseye. "
),
metadata={"year": 1999, "genre": "animated", "rating": 7.9},
),
TextNode(
text=(
"The toys are mistakenly delivered to a day-care center instead of the attic "
+ "right before Andy leaves for college, and it's up to Woody to convince the "
+ "other toys that they weren't abandoned and to return home."
),
metadata={"year": 2010, "genre": "animated", "rating": 8.3},
),
]
index = | VectaraIndex(nodes=nodes) | llama_index.indices.managed.vectara.VectaraIndex |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
import wikipedia
import urllib.request
from pathlib import Path
image_path = Path("mixed_wiki")
image_uuid = 0
image_metadata_dict = {}
MAX_IMAGES_PER_WIKI = 30
wiki_titles = [
"Vincent van Gogh",
"San Francisco",
"Batman",
"iPhone",
"Tesla Model S",
"BTS band",
]
if not image_path.exists():
Path.mkdir(image_path)
for title in wiki_titles:
images_per_wiki = 0
print(title)
try:
page_py = wikipedia.page(title)
list_img_urls = page_py.images
for url in list_img_urls:
if url.endswith(".jpg") or url.endswith(".png"):
image_uuid += 1
image_file_name = title + "_" + url.split("/")[-1]
image_metadata_dict[image_uuid] = {
"filename": image_file_name,
"img_path": "./" + str(image_path / f"{image_uuid}.jpg"),
}
urllib.request.urlretrieve(
url, image_path / f"{image_uuid}.jpg"
)
images_per_wiki += 1
if images_per_wiki > MAX_IMAGES_PER_WIKI:
break
except:
print(str(Exception("No images found for Wikipedia page: ")) + title)
continue
from PIL import Image
import matplotlib.pyplot as plt
import os
image_paths = []
for img_path in os.listdir("./mixed_wiki"):
image_paths.append(str(os.path.join("./mixed_wiki", img_path)))
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(3, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 9:
break
plot_images(image_paths)
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import SimpleDirectoryReader, StorageContext
import qdrant_client
from llama_index.core import SimpleDirectoryReader
client = qdrant_client.QdrantClient(path="qdrant_img_db")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
documents = SimpleDirectoryReader("./mixed_wiki/").load_data()
index = MultiModalVectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
input_image = "./mixed_wiki/2.jpg"
plot_images([input_image])
retriever_engine = index.as_retriever(image_similarity_top_k=4)
retrieval_results = retriever_engine.image_to_image_retrieve(
"./mixed_wiki/2.jpg"
)
retrieved_images = []
for res in retrieval_results:
retrieved_images.append(res.node.metadata["file_path"])
plot_images(retrieved_images[1:])
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
from llama_index.core.schema import ImageDocument
image_documents = [ImageDocument(image_path=input_image)]
for res_img in retrieved_images[1:]:
image_documents.append( | ImageDocument(image_path=res_img) | llama_index.core.schema.ImageDocument |
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.postprocessor import LLMRerank
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import Settings
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.packs.koda_retriever import KodaRetriever
import os
from pinecone import Pinecone
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index = pc.Index("sample-movies")
Settings.llm = OpenAI()
Settings.embed_model = OpenAIEmbedding()
vector_store = | PineconeVectorStore(pinecone_index=index, text_key="summary") | llama_index.vector_stores.pinecone.PineconeVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-cpp')
get_ipython().system('pip install llama-index lm-format-enforcer llama-cpp-python')
import lmformatenforcer
import re
from llama_index.core.prompts.lmformatenforcer_utils import (
activate_lm_format_enforcer,
build_lm_format_enforcer_function,
)
regex = r'"Hello, my name is (?P<name>[a-zA-Z]*)\. I was born in (?P<hometown>[a-zA-Z]*). Nice to meet you!"'
from llama_index.llms.llama_cpp import LlamaCPP
llm = LlamaCPP()
regex_parser = lmformatenforcer.RegexParser(regex)
lm_format_enforcer_fn = build_lm_format_enforcer_function(llm, regex_parser)
with | activate_lm_format_enforcer(llm, lm_format_enforcer_fn) | llama_index.core.prompts.lmformatenforcer_utils.activate_lm_format_enforcer |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core import PromptTemplate
choices = [
"Useful for questions related to apples",
"Useful for questions related to oranges",
]
def get_choice_str(choices):
choices_str = "\n\n".join(
[f"{idx+1}. {c}" for idx, c in enumerate(choices)]
)
return choices_str
choices_str = get_choice_str(choices)
router_prompt0 = PromptTemplate(
"Some choices are given below. It is provided in a numbered list (1 to"
" {num_choices}), where each item in the list corresponds to a"
" summary.\n---------------------\n{context_list}\n---------------------\nUsing"
" only the choices above and not prior knowledge, return the top choices"
" (no more than {max_outputs}, but only select what is needed) that are"
" most relevant to the question: '{query_str}'\n"
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
def get_formatted_prompt(query_str):
fmt_prompt = router_prompt0.format(
num_choices=len(choices),
max_outputs=2,
context_list=choices_str,
query_str=query_str,
)
return fmt_prompt
query_str = "Can you tell me more about the amount of Vitamin C in apples"
fmt_prompt = get_formatted_prompt(query_str)
response = llm.complete(fmt_prompt)
print(str(response))
query_str = "What are the health benefits of eating orange peels?"
fmt_prompt = get_formatted_prompt(query_str)
response = llm.complete(fmt_prompt)
print(str(response))
query_str = (
"Can you tell me more about the amount of Vitamin C in apples and oranges."
)
fmt_prompt = get_formatted_prompt(query_str)
response = llm.complete(fmt_prompt)
print(str(response))
from dataclasses import fields
from pydantic import BaseModel
import json
class Answer(BaseModel):
choice: int
reason: str
print(json.dumps(Answer.schema(), indent=2))
from llama_index.core.types import BaseOutputParser
FORMAT_STR = """The output should be formatted as a JSON instance that conforms to
the JSON schema below.
Here is the output schema:
{
"type": "array",
"items": {
"type": "object",
"properties": {
"choice": {
"type": "integer"
},
"reason": {
"type": "string"
}
},
"required": [
"choice",
"reason"
],
"additionalProperties": false
}
}
"""
def _escape_curly_braces(input_string: str) -> str:
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
return escaped_string
def _marshal_output_to_json(output: str) -> str:
output = output.strip()
left = output.find("[")
right = output.find("]")
output = output[left : right + 1]
return output
from typing import List
class RouterOutputParser(BaseOutputParser):
def parse(self, output: str) -> List[Answer]:
"""Parse string."""
json_output = _marshal_output_to_json(output)
json_dicts = json.loads(json_output)
answers = [Answer.from_dict(json_dict) for json_dict in json_dicts]
return answers
def format(self, prompt_template: str) -> str:
return prompt_template + "\n\n" + _escape_curly_braces(FORMAT_STR)
output_parser = RouterOutputParser()
from typing import List
def route_query(
query_str: str, choices: List[str], output_parser: RouterOutputParser
):
choices_str
fmt_base_prompt = router_prompt0.format(
num_choices=len(choices),
max_outputs=len(choices),
context_list=choices_str,
query_str=query_str,
)
fmt_json_prompt = output_parser.format(fmt_base_prompt)
raw_output = llm.complete(fmt_json_prompt)
parsed = output_parser.parse(str(raw_output))
return parsed
from pydantic import Field
class Answer(BaseModel):
"Represents a single choice with a reason."
choice: int
reason: str
class Answers(BaseModel):
"""Represents a list of answers."""
answers: List[Answer]
Answers.schema()
from llama_index.program.openai import OpenAIPydanticProgram
router_prompt1 = router_prompt0.partial_format(
num_choices=len(choices),
max_outputs=len(choices),
)
program = OpenAIPydanticProgram.from_defaults(
output_cls=Answers,
prompt=router_prompt1,
verbose=True,
)
query_str = "What are the health benefits of eating orange peels?"
output = program(context_list=choices_str, query_str=query_str)
output
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.core import SummaryIndex
from llama_index.core.node_parser import SentenceSplitter
splitter = | SentenceSplitter(chunk_size=1024) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import nltk
nltk.download("stopwords")
import llama_index.core
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
query_engine = index.as_query_engine(response_mode="tree_summarize")
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
query_modes = [
"svm",
"linear_regression",
"logistic_regression",
]
for query_mode in query_modes:
query_engine = index.as_query_engine(vector_store_query_mode=query_mode)
response = query_engine.query("What did the author do growing up?")
print(f"Query mode: {query_mode}")
display(Markdown(f"<b>{response}</b>"))
display(Markdown(f"<b>{response}</b>"))
print(response.source_nodes[0].text)
from llama_index.core import QueryBundle
query_bundle = QueryBundle(
query_str="What did the author do growing up?",
custom_embedding_strs=["The author grew up painting."],
)
query_engine = index.as_query_engine()
response = query_engine.query(query_bundle)
display(Markdown(f"<b>{response}</b>"))
query_engine = index.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response = query_engine.query("What did the author do growing up?")
print(response.get_formatted_sources())
from llama_index.core import Document
doc = | Document(text="target", metadata={"tag": "target"}) | llama_index.core.Document |
import os
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2312.04511.pdf" -O "llm_compiler.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2312.06648.pdf" -O "dense_x_retrieval.pdf"')
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(input_files=["dense_x_retrieval.pdf"])
dense_x_retrieval_docs = reader.load_data()
reader = | SimpleDirectoryReader(input_files=["llm_compiler.pdf"]) | llama_index.core.SimpleDirectoryReader |
get_ipython().system('pip install llama-index')
get_ipython().system('pip install duckdb')
get_ipython().system('pip install llama-index-vector-stores-duckdb')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.duckdb import DuckDBVectorStore
from llama_index.core import StorageContext
from IPython.display import Markdown, display
import os
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = DuckDBVectorStore()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = DuckDBVectorStore("pg.duckdb", persist_dir="./persist/")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
vector_store = | DuckDBVectorStore.from_local("./persist/pg.duckdb") | llama_index.vector_stores.duckdb.DuckDBVectorStore.from_local |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.database.base import DatabaseToolSpec
db_spec = DatabaseToolSpec(
scheme="postgresql", # Database Scheme
host="localhost", # Database Host
port="5432", # Database Port
user="postgres", # Database User
password="x", # Database Password
dbname="your_db", # Database Name
)
tools = db_spec.to_tool_list()
for tool in tools:
print(tool.metadata.name)
print(tool.metadata.description)
print(tool.metadata.fn_schema)
agent = | OpenAIAgent.from_tools(tools, verbose=True) | llama_index.agent.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("../data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 1024
nodes = Settings.node_parser.get_nodes_from_documents(documents)
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
from llama_index.core import SummaryIndex
from llama_index.core import VectorStoreIndex
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
list_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = vector_index.as_query_engine()
from llama_index.core.tools import QueryEngineTool
list_tool = QueryEngineTool.from_defaults(
query_engine=list_query_engine,
description=(
"Useful for summarization questions related to Paul Graham eassy on"
" What I Worked On."
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context from Paul Graham essay on What"
" I Worked On."
),
)
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
query_engine = RouterQueryEngine(
selector=PydanticSingleSelector.from_defaults(),
query_engine_tools=[
list_tool,
vector_tool,
],
)
response = query_engine.query("What is the summary of the document?")
print(str(response))
response = query_engine.query("What did Paul Graham do after RICS?")
print(str(response))
query_engine = RouterQueryEngine(
selector= | LLMSingleSelector.from_defaults() | llama_index.core.selectors.LLMSingleSelector.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-bagel')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.bagel import BagelVectorStore
from llama_index.core import StorageContext
from IPython.display import Markdown, display
import bagel
from bagel import Settings
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
server_settings = Settings(
bagel_api_impl="rest", bagel_server_host="api.bageldb.ai"
)
client = bagel.Client(server_settings)
collection = client.get_or_create_cluster("testing_embeddings")
embed_model = "local:BAAI/bge-small-en-v1.5"
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = | BagelVectorStore(collection=collection) | llama_index.vector_stores.bagel.BagelVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, get_response_synthesizer
from llama_index.core import DocumentSummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.node_parser import SentenceSplitter
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
city_docs = []
for wiki_title in wiki_titles:
docs = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
docs[0].doc_id = wiki_title
city_docs.extend(docs)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
splitter = SentenceSplitter(chunk_size=1024)
response_synthesizer = get_response_synthesizer(
response_mode="tree_summarize", use_async=True
)
doc_summary_index = DocumentSummaryIndex.from_documents(
city_docs,
llm=chatgpt,
transformations=[splitter],
response_synthesizer=response_synthesizer,
show_progress=True,
)
doc_summary_index.get_document_summary("Boston")
doc_summary_index.storage_context.persist("index")
from llama_index.core import load_index_from_storage
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults(persist_dir="index")
doc_summary_index = load_index_from_storage(storage_context)
query_engine = doc_summary_index.as_query_engine(
response_mode="tree_summarize", use_async=True
)
response = query_engine.query("What are the sports teams in Toronto?")
print(response)
from llama_index.core.indices.document_summary import (
DocumentSummaryIndexLLMRetriever,
)
retriever = DocumentSummaryIndexLLMRetriever(
doc_summary_index,
)
retrieved_nodes = retriever.retrieve("What are the sports teams in Toronto?")
print(len(retrieved_nodes))
print(retrieved_nodes[0].score)
print(retrieved_nodes[0].node.get_text())
from llama_index.core.query_engine import RetrieverQueryEngine
response_synthesizer = | get_response_synthesizer(response_mode="tree_summarize") | llama_index.core.get_response_synthesizer |
get_ipython().run_line_magic('pip', 'install llama-index-readers-google')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SummaryIndex
from llama_index.readers.google import GoogleDocsReader
from IPython.display import Markdown, display
import os
document_ids = ["<document_id>"]
documents = | GoogleDocsReader() | llama_index.readers.google.GoogleDocsReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex
from llama_index.core import PromptTemplate
from IPython.display import Markdown, display
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = | PyMuPDFReader() | llama_index.readers.file.PyMuPDFReader |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = | ObservationReasoningStep(observation=task.input) | llama_index.core.agent.react.types.ObservationReasoningStep |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
import nest_asyncio
nest_asyncio.apply()
import os
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
get_ipython().system('pip install wikipedia -q')
from llama_index.readers.wikipedia import WikipediaReader
cities = [
"San Francisco",
"Toronto",
"New York",
"Vancouver",
"Montreal",
"Tokyo",
"Singapore",
"Paris",
]
documents = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-supabase')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.supabase import SupabaseVectorStore
import textwrap
import os
os.environ["OPENAI_API_KEY"] = "[your_openai_api_key]"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
vector_store = | SupabaseVectorStore(
postgres_connection_string=(
"postgresql://<user>:<password>@<host>:<port>/<db_name>"
) | llama_index.vector_stores.supabase.SupabaseVectorStore |
get_ipython().system('pip install llama-index')
import logging
import sys
from IPython.display import Markdown, display
import pandas as pd
from llama_index.core.query_engine import PandasQueryEngine
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
df = pd.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
}
)
query_engine = PandasQueryEngine(df=df, verbose=True)
response = query_engine.query(
"What is the city with the highest population?",
)
display(Markdown(f"<b>{response}</b>"))
print(response.metadata["pandas_instruction_str"])
query_engine = | PandasQueryEngine(df=df, verbose=True, synthesize_response=True) | llama_index.core.query_engine.PandasQueryEngine |
import os
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
from llama_index.llms.openai import OpenAI
llm = OpenAI("gpt-4")
from llama_index.core.llama_pack import download_llama_pack
SelfDiscoverPack = | download_llama_pack("SelfDiscoverPack", "./self_discover_pack") | llama_index.core.llama_pack.download_llama_pack |
get_ipython().system('pip install exa_py')
import os
import openai
from llama_index.agent import OpenAIAgent
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_hub.tools.exa.base import ExaToolSpec
exa_tool = ExaToolSpec(
api_key=os.environ["EXA_API_KEY"],
)
exa_tool_list = exa_tool.to_tool_list()
for tool in exa_tool_list:
print(tool.metadata.name)
exa_tool.search_and_retrieve_documents("machine learning transformers", num_results=3)
exa_tool.find_similar(
"https://www.mihaileric.com/posts/transformers-attention-in-disguise/"
)
exa_tool.search_and_retrieve_documents(
"This is a summary of recent research around diffusion models:", num_results=1
)
exa_tool.search_and_retrieve_highlights(
"This is a summary of recent research around diffusion models:", num_results=1
)
agent = OpenAIAgent.from_tools(
exa_tool_list,
verbose=True,
)
print(agent.chat("What are the best resturants in toronto?"))
print(agent.chat("tell me more about Osteria Giulia"))
tools = exa_tool.to_tool_list(
spec_functions=["search_and_retrieve_highlights", "current_date"]
)
agent = OpenAIAgent.from_tools(
tools,
verbose=True,
)
response = agent.chat("Tell me more about the recent news on semiconductors")
print(f"Response: {str(response)}")
from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec
search_and_retrieve_docs_tool = exa_tool.to_tool_list(
spec_functions=["search_and_retrieve_documents"]
)[0]
date_tool = exa_tool.to_tool_list(spec_functions=["current_date"])[0]
wrapped_retrieve = | LoadAndSearchToolSpec.from_defaults(search_and_retrieve_docs_tool) | llama_index.tools.tool_spec.load_and_search.base.LoadAndSearchToolSpec.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-you')
from llama_index.retrievers.you import YouRetriever
you_api_key = "" or os.environ["YOU_API_KEY"]
retriever = | YouRetriever(api_key=you_api_key) | llama_index.retrievers.you.YouRetriever |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-typesense')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from IPython.display import Markdown, display
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.vector_stores.typesense import TypesenseVectorStore
from typesense import Client
typesense_client = Client(
{
"api_key": "xyz",
"nodes": [{"host": "localhost", "port": "8108", "protocol": "http"}],
"connection_timeout_seconds": 2,
}
)
typesense_vector_store = TypesenseVectorStore(typesense_client)
storage_context = StorageContext.from_defaults(
vector_store=typesense_vector_store
)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
from llama_index.core import QueryBundle
from llama_index.embeddings.openai import OpenAIEmbedding
query_str = "What did the author do growing up?"
embed_model = OpenAIEmbedding()
from llama_index.core import Settings
query_embedding = embed_model.get_agg_embedding_from_queries(query_str)
query_bundle = QueryBundle(query_str, embedding=query_embedding)
response = index.as_query_engine().query(query_bundle)
display(Markdown(f"<b>{response}</b>"))
from llama_index.core.vector_stores.types import VectorStoreQueryMode
query_bundle = | QueryBundle(query_str=query_str) | llama_index.core.QueryBundle |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.bing_search.base import BingSearchToolSpec
bing_tool = | BingSearchToolSpec(api_key="your-key") | llama_index.tools.bing_search.base.BingSearchToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True)
output = p.run(movie_name="The Departed")
print(str(output))
from typing import List
from pydantic import BaseModel, Field
from llama_index.core.output_parsers import PydanticOutputParser
class Movie(BaseModel):
"""Object representing a single movie."""
name: str = Field(..., description="Name of the movie.")
year: int = Field(..., description="Year of the movie.")
class Movies(BaseModel):
"""Object representing a list of movies."""
movies: List[Movie] = Field(..., description="List of movies.")
llm = OpenAI(model="gpt-3.5-turbo")
output_parser = PydanticOutputParser(Movies)
json_prompt_str = """\
Please generate related movies to {movie_name}. Output with the following JSON format:
"""
json_prompt_str = output_parser.format(json_prompt_str)
json_prompt_tmpl = PromptTemplate(json_prompt_str)
p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True)
output = p.run(movie_name="Toy Story")
output
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
prompt_str2 = """\
Here's some text:
{text}
Can you rewrite this with a summary of each movie?
"""
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
llm_c = llm.as_query_component(streaming=True)
p = QueryPipeline(
chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True
)
output = p.run(movie_name="The Dark Knight")
for o in output:
print(o.delta, end="")
p = QueryPipeline(
chain=[
json_prompt_tmpl,
llm.as_query_component(streaming=True),
output_parser,
],
verbose=True,
)
output = p.run(movie_name="Toy Story")
print(output)
from llama_index.postprocessor.cohere_rerank import CohereRerank
prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl1 = PromptTemplate(prompt_str1)
prompt_str2 = (
"Please write a passage to answer the question\n"
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{query_str}\n"
"\n"
"\n"
'Passage:"""\n'
)
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=5)
p = QueryPipeline(
chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever], verbose=True
)
nodes = p.run(topic="college")
len(nodes)
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
prompt_str = "Please generate a question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=3)
reranker = CohereRerank()
summarizer = TreeSummarize(llm=llm)
p = | QueryPipeline(verbose=True) | llama_index.core.query_pipeline.QueryPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-colbert')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install torch sentence-transformers')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.indices.managed.google import GoogleIndex
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
project_name = "TODO-your-project-name" # @param {type:"string"}
email = "[email protected]" # @param {type:"string"}
client_file_name = "client_secret.json"
get_ipython().system('gcloud config set project $project_name')
get_ipython().system('gcloud config set account $email')
get_ipython().system('gcloud auth application-default login --no-browser --client-id-file=$client_file_name --scopes="https://www.googleapis.com/auth/generative-language.retriever,https://www.googleapis.com/auth/cloud-platform"')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
google_index = GoogleIndex.create_corpus(display_name="My first corpus!")
print(f"Newly created corpus ID is {google_index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
google_index.insert_documents(documents)
google_index = | GoogleIndex.from_corpus(corpus_id="") | llama_index.indices.managed.google.GoogleIndex.from_corpus |
get_ipython().run_line_magic('pip', 'install llama-index-readers-deeplake')
get_ipython().system('pip install llama-index')
import getpass
import os
import random
import textwrap
from llama_index.core import VectorStoreIndex
from llama_index.readers.deeplake import DeepLakeReader
os.environ["OPENAI_API_KEY"] = getpass.getpass("open ai api key: ")
reader = | DeepLakeReader() | llama_index.readers.deeplake.DeepLakeReader |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.text_to_image.base import TextToImageToolSpec
text_to_image_spec = | TextToImageToolSpec() | llama_index.tools.text_to_image.base.TextToImageToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-llms-rungpt')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install rungpt')
get_ipython().system('rungpt serve decapoda-research/llama-7b-hf --precision fp16 --device_map balanced')
from llama_index.llms.rungpt import RunGptLLM
llm = RunGptLLM()
promot = "What public transportation might be available in a city?"
response = llm.complete(promot)
print(response)
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.llms.rungpt import RunGptLLM
messages = [
ChatMessage(
role=MessageRole.USER,
content="Now, I want you to do some math for me.",
),
ChatMessage(
role=MessageRole.ASSISTANT, content="Sure, I would like to help you."
),
ChatMessage(
role=MessageRole.USER,
content="How many points determine a straight line?",
),
]
llm = RunGptLLM()
response = llm.chat(messages=messages, temperature=0.8, max_tokens=15)
print(response)
promot = "What public transportation might be available in a city?"
response = | RunGptLLM() | llama_index.llms.rungpt.RunGptLLM |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().system('pip install llama-index')
from llama_index.llms.cohere import Cohere
api_key = "Your api key"
resp = Cohere(api_key=api_key).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.cohere import Cohere
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Cohere(api_key=api_key).chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
print(resp)
from llama_index.llms.openai import OpenAI
llm = | Cohere(api_key=api_key) | llama_index.llms.cohere.Cohere |
get_ipython().run_line_magic('pip', 'install llama-index-llms-clarifai')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install clarifai')
import os
os.environ["CLARIFAI_PAT"] = "<YOUR CLARIFAI PAT>"
from llama_index.llms.clarifai import Clarifai
params = dict(
user_id="clarifai",
app_id="ml",
model_name="llama2-7b-alternative-4k",
model_url=(
"https://clarifai.com/clarifai/ml/models/llama2-7b-alternative-4k"
),
)
llm_model = | Clarifai(model_url=params["model_url"]) | llama_index.llms.clarifai.Clarifai |
get_ipython().system('pip install llama-index-llms-dashscope')
get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY')
import os
os.environ["DASHSCOPE_API_KEY"] = "YOUR_DASHSCOPE_API_KEY"
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels
dashscope_llm = DashScope(model_name=DashScopeGenerationModels.QWEN_MAX)
resp = dashscope_llm.complete("How to make cake?")
print(resp)
responses = dashscope_llm.stream_complete("How to make cake?")
for response in responses:
print(response.delta, end="")
from llama_index.core.base.llms.types import MessageRole, ChatMessage
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are a helpful assistant."
),
| ChatMessage(role=MessageRole.USER, content="How to make cake?") | llama_index.core.base.llms.types.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
from llama_index.core import set_global_handler
set_global_handler("wandb", run_args={"project": "llamaindex"})
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.llms.openai import OpenAI
from llama_index.core.schema import MetadataMode
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512)
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
)
node_parser = TokenTextSplitter(
separator=" ", chunk_size=256, chunk_overlap=128
)
extractors_1 = [
QuestionsAnsweredExtractor(
questions=3, llm=llm, metadata_mode=MetadataMode.EMBED
),
]
extractors_2 = [
SummaryExtractor(summaries=["prev", "self", "next"], llm=llm),
QuestionsAnsweredExtractor(
questions=3, llm=llm, metadata_mode=MetadataMode.EMBED
),
]
from llama_index.core import SimpleDirectoryReader
from llama_index.readers.web import SimpleWebPageReader
reader = SimpleWebPageReader(html_to_text=True)
docs = reader.load_data(urls=["https://eugeneyan.com/writing/llm-patterns/"])
print(docs[0].get_content())
orig_nodes = node_parser.get_nodes_from_documents(docs)
nodes = orig_nodes[20:28]
print(nodes[3].get_content(metadata_mode="all"))
from llama_index.core.ingestion import IngestionPipeline
pipeline = IngestionPipeline(transformations=[node_parser, *extractors_1])
nodes_1 = pipeline.run(nodes=nodes, in_place=False, show_progress=True)
print(nodes_1[3].get_content(metadata_mode="all"))
pipeline = | IngestionPipeline(transformations=[node_parser, *extractors_2]) | llama_index.core.ingestion.IngestionPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-redis')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install redis')
get_ipython().system('docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system('rm -rf test_redis_data')
get_ipython().system('mkdir -p test_redis_data')
get_ipython().system('echo "This is a test file: one!" > test_redis_data/test1.txt')
get_ipython().system('echo "This is a test file: two!" > test_redis_data/test2.txt')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
"./test_redis_data", filename_as_id=True
).load_data()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.ingestion import (
DocstoreStrategy,
IngestionPipeline,
IngestionCache,
)
from llama_index.core.ingestion.cache import RedisCache
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.core.node_parser import SentenceSplitter
from llama_index.vector_stores.redis import RedisVectorStore
embed_model = | HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") | llama_index.embeddings.huggingface.HuggingFaceEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().system('pip install llama-index')
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
pprint_response,
)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents=documents)
import os
from llama_index.postprocessor.cohere_rerank import CohereRerank
api_key = os.environ["COHERE_API_KEY"]
cohere_rerank = | CohereRerank(api_key=api_key, top_n=2) | llama_index.postprocessor.cohere_rerank.CohereRerank |
get_ipython().system('pip install llama-index-llms-dashscope')
get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY')
import os
os.environ["DASHSCOPE_API_KEY"] = "YOUR_DASHSCOPE_API_KEY"
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels
dashscope_llm = DashScope(model_name=DashScopeGenerationModels.QWEN_MAX)
resp = dashscope_llm.complete("How to make cake?")
print(resp)
responses = dashscope_llm.stream_complete("How to make cake?")
for response in responses:
print(response.delta, end="")
from llama_index.core.base.llms.types import MessageRole, ChatMessage
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are a helpful assistant."
),
ChatMessage(role=MessageRole.USER, content="How to make cake?"),
]
resp = dashscope_llm.chat(messages)
print(resp)
responses = dashscope_llm.stream_chat(messages)
for response in responses:
print(response.delta, end="")
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are a helpful assistant."
),
ChatMessage(role=MessageRole.USER, content="How to make cake?"),
]
resp = dashscope_llm.chat(messages)
print(resp)
messages.append(
| ChatMessage(role=MessageRole.ASSISTANT, content=resp.message.content) | llama_index.core.base.llms.types.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openllm')
get_ipython().system('pip install "openllm" # use \'openllm[vllm]\' if you have access to GPU')
get_ipython().system('pip install llama-index')
import os
from typing import List, Optional
from llama_index.llms.openllm import OpenLLM, OpenLLMAPI
from llama_index.core.llms import ChatMessage
os.environ[
"OPENLLM_ENDPOINT"
] = "na" # Change this to a remote server that you might run OpenLLM at.
local_llm = OpenLLM("HuggingFaceH4/zephyr-7b-alpha")
remote_llm = | OpenLLMAPI(address="http://localhost:3000") | llama_index.llms.openllm.OpenLLMAPI |
from llama_hub.semanticscholar.base import SemanticScholarReader
import os
import openai
from llama_index.llms import OpenAI
from llama_index.query_engine import CitationQueryEngine
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
ServiceContext,
)
from llama_index.response.notebook_utils import display_response
s2reader = SemanticScholarReader()
openai.api_key = os.environ["OPENAI_API_KEY"]
service_context = ServiceContext.from_defaults(
llm=OpenAI(model="gpt-3.5-turbo", temperature=0)
)
query_space = "large language models"
full_text = True
total_papers = 50
persist_dir = (
"./citation_" + query_space + "_" + str(total_papers) + "_" + str(full_text)
)
if not os.path.exists(persist_dir):
documents = s2reader.load_data(query_space, total_papers, full_text=full_text)
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir=persist_dir)
else:
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=persist_dir),
service_context=service_context,
)
query_engine = CitationQueryEngine.from_args(
index,
similarity_top_k=3,
citation_chunk_size=512,
)
query_string = "limitations of using large language models"
response = query_engine.query(query_string)
display_response(
response, show_source=True, source_length=100, show_source_metadata=True
)
query_space = "covid 19 vaccine"
query_string = "List the efficacy numbers of the covid 19 vaccines"
full_text = True
total_papers = 50
persist_dir = (
"./citation_" + query_space + "_" + str(total_papers) + "_" + str(full_text)
)
if not os.path.exists(persist_dir):
documents = s2reader.load_data(query_space, total_papers, full_text=full_text)
index = | VectorStoreIndex.from_documents(documents, service_context=service_context) | llama_index.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index')
from llama_index.core import SummaryIndex
from llama_index.readers.web import SimpleWebPageReader
from IPython.display import Markdown, display
import os
documents = | SimpleWebPageReader(html_to_text=True) | llama_index.readers.web.SimpleWebPageReader |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
import os
OPENAI_API_TOKEN = ""
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from pathlib import Path
input_image_path = Path("input_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1nUhsBRiSWxcVQv8t8Cvvro8HJZ88LCzj" -O ./input_images/long_range_spec.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=19pLwx0nVqsop7lo0ubUSYTzQfMtKJJtJ" -O ./input_images/model_y.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1utu3iD9XEgR5Sb7PrbtMf1qw8T1WdNmF" -O ./input_images/performance_spec.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1dpUakWMqaXR4Jjn1kHuZfB0pAXvjn2-i" -O ./input_images/price.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1qNeT201QAesnAP5va1ty0Ky5Q_jKkguV" -O ./input_images/real_wheel_spec.png')
from PIL import Image
import matplotlib.pyplot as plt
import os
image_paths = []
for img_path in os.listdir("./input_images"):
image_paths.append(str(os.path.join("./input_images", img_path)))
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(2, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 9:
break
plot_images(image_paths)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader("./input_images").load_data()
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1500
)
response_1 = openai_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(response_1)
response_2 = openai_mm_llm.complete(
prompt="Can you tell me what is the price with each spec?",
image_documents=image_documents,
)
print(response_2)
import requests
def get_wikipedia_images(title):
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "imageinfo",
"iiprop": "url|dimensions|mime",
"generator": "images",
"gimlimit": "50",
},
).json()
image_urls = []
for page in response["query"]["pages"].values():
if page["imageinfo"][0]["url"].endswith(".jpg") or page["imageinfo"][
0
]["url"].endswith(".png"):
image_urls.append(page["imageinfo"][0]["url"])
return image_urls
from pathlib import Path
import requests
import urllib.request
image_uuid = 0
image_metadata_dict = {}
MAX_IMAGES_PER_WIKI = 20
wiki_titles = {
"Tesla Model Y",
"Tesla Model X",
"Tesla Model 3",
"Tesla Model S",
"Kia EV6",
"BMW i3",
"Audi e-tron",
"Ford Mustang",
"Porsche Taycan",
"Rivian",
"Polestar",
}
data_path = Path("mixed_wiki")
if not data_path.exists():
Path.mkdir(data_path)
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
images_per_wiki = 0
try:
list_img_urls = get_wikipedia_images(title)
for url in list_img_urls:
if (
url.endswith(".jpg")
or url.endswith(".png")
or url.endswith(".svg")
):
image_uuid += 1
urllib.request.urlretrieve(
url, data_path / f"{image_uuid}.jpg"
)
images_per_wiki += 1
if images_per_wiki > MAX_IMAGES_PER_WIKI:
break
except:
print(str(Exception("No images found for Wikipedia page: ")) + title)
continue
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O ./mixed_wiki/tesla_2021_10k.htm')
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import SimpleDirectoryReader, StorageContext
import qdrant_client
from llama_index.core import SimpleDirectoryReader
client = qdrant_client.QdrantClient(path="qdrant_mm_db")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
documents = SimpleDirectoryReader("./mixed_wiki/").load_data()
index = MultiModalVectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
from llama_index.core import load_index_from_storage
print(response_2.text)
MAX_TOKENS = 50
retriever_engine = index.as_retriever(
similarity_top_k=3, image_similarity_top_k=3
)
retrieval_results = retriever_engine.retrieve(response_2.text[:MAX_TOKENS])
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.schema import ImageNode
retrieved_image = []
for res_node in retrieval_results:
if isinstance(res_node.node, ImageNode):
retrieved_image.append(res_node.node.metadata["file_path"])
else:
display_source_node(res_node, source_length=200)
plot_images(retrieved_image)
response_3 = openai_mm_llm.complete(
prompt="what are other similar cars?",
image_documents=image_documents,
)
print(response_3)
from llama_index.core import PromptTemplate
from llama_index.core.query_engine import SimpleMultiModalQueryEngine
qa_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_tmpl = PromptTemplate(qa_tmpl_str)
query_engine = index.as_query_engine(
multi_modal_llm=openai_mm_llm, text_qa_template=qa_tmpl
)
query_str = "Tell me more about the Porsche"
response = query_engine.query(query_str)
print(str(response))
from llama_index.core.response.notebook_utils import display_source_node
for text_node in response.metadata["text_nodes"]:
| display_source_node(text_node, source_length=200) | llama_index.core.response.notebook_utils.display_source_node |
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.postprocessor import LLMRerank
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import Settings
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.packs.koda_retriever import KodaRetriever
import os
from pinecone import Pinecone
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index = pc.Index("sample-movies")
Settings.llm = OpenAI()
Settings.embed_model = OpenAIEmbedding()
vector_store = PineconeVectorStore(pinecone_index=index, text_key="summary")
vector_index = VectorStoreIndex.from_vector_store(
vector_store=vector_store, embed_model=Settings.embed_model
)
reranker = | LLMRerank(llm=Settings.llm) | llama_index.core.postprocessor.LLMRerank |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-postgres')
get_ipython().system('pip install llama-index')
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.postgres import PGVectorStore
import textwrap
import openai
import os
os.environ["OPENAI_API_KEY"] = "<your key>"
openai.api_key = "<your key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
print("Document ID:", documents[0].doc_id)
import psycopg2
connection_string = "postgresql://postgres:password@localhost:5432"
db_name = "vector_db"
conn = psycopg2.connect(connection_string)
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {db_name}")
c.execute(f"CREATE DATABASE {db_name}")
from sqlalchemy import make_url
url = make_url(connection_string)
vector_store = PGVectorStore.from_params(
database=db_name,
host=url.host,
password=url.password,
port=url.port,
user=url.username,
table_name="paul_graham_essay",
embed_dim=1536, # openai embedding dimension
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, show_progress=True
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What happened in the mid 1980s?")
print(textwrap.fill(str(response), 100))
vector_store = PGVectorStore.from_params(
database="vector_db",
host="localhost",
password="password",
port=5432,
user="postgres",
table_name="paul_graham_essay",
embed_dim=1536, # openai embedding dimension
)
index = | VectorStoreIndex.from_vector_store(vector_store=vector_store) | llama_index.core.VectorStoreIndex.from_vector_store |
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-api')
get_ipython().system('pip install llama-index')
from llama_index.llms.llama_api import LlamaAPI
api_key = "LL-your-key"
llm = LlamaAPI(api_key=api_key)
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.chat(messages)
print(resp)
from pydantic import BaseModel
from llama_index.core.llms.openai_utils import to_openai_function
class Song(BaseModel):
"""A song with name and artist"""
name: str
artist: str
song_fn = to_openai_function(Song)
llm = LlamaAPI(api_key=api_key)
response = llm.complete("Generate a song", functions=[song_fn])
function_call = response.additional_kwargs["function_call"]
print(function_call)
from pydantic import BaseModel
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_mins: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
from llama_index.program.openai import OpenAIPydanticProgram
prompt_template_str = """\
Extract album and songs from the text provided.
For each song, make sure to specify the title and the length_mins.
{text}
"""
llm = | LlamaAPI(api_key=api_key, temperature=0.0) | llama_index.llms.llama_api.LlamaAPI |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = | PyMuPDFReader() | llama_index.readers.file.PyMuPDFReader |
get_ipython().run_line_magic('pip', 'install llama-index llama-index-vector-stores-qdrant -q')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data')
get_ipython().system('wget "https://arxiv.org/pdf/2402.09353.pdf" -O "./data/dorav1.pdf"')
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4")
response = llm.complete("What is DoRA?")
print(response.text)
"""Load the data.
With llama-index, before any transformations are applied,
data is loaded in the `Document` abstraction, which is
a container that holds the text of the document.
"""
from llama_index.core import SimpleDirectoryReader
loader = SimpleDirectoryReader(input_dir="./data")
documents = loader.load_data()
"""Chunk, Encode, and Store into a Vector Store.
To streamline the process, we can make use of the IngestionPipeline
class that will apply your specified transformations to the
Document's.
"""
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client
client = qdrant_client.QdrantClient(location=":memory:")
vector_store = QdrantVectorStore(client=client, collection_name="test_store")
pipeline = IngestionPipeline(
transformations=[
| SentenceSplitter() | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-ollama')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import LLMRerank
from llama_index.llms.openai import OpenAI
from IPython.display import Markdown, display
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.chunk_size = 512
from pathlib import Path
import requests
wiki_titles = [
"Vincent van Gogh",
]
data_path = Path("data_wiki")
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
documents = SimpleDirectoryReader("./data_wiki/").load_data()
index = VectorStoreIndex.from_documents(
documents,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core import QueryBundle
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
import pandas as pd
from IPython.display import display, HTML
def get_retrieved_nodes(
query_str, vector_top_k=10, reranker_top_n=3, with_reranker=False
):
query_bundle = QueryBundle(query_str)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=vector_top_k,
)
retrieved_nodes = retriever.retrieve(query_bundle)
if with_reranker:
reranker = RankGPTRerank(
llm=OpenAI(
model="gpt-3.5-turbo-16k",
temperature=0.0,
api_key=OPENAI_API_TOKEN,
),
top_n=reranker_top_n,
verbose=True,
)
retrieved_nodes = reranker.postprocess_nodes(
retrieved_nodes, query_bundle
)
return retrieved_nodes
def pretty_print(df):
return display(HTML(df.to_html().replace("\\n", "<br>")))
def visualize_retrieved_nodes(nodes) -> None:
result_dicts = []
for node in nodes:
result_dict = {"Score": node.score, "Text": node.node.get_text()}
result_dicts.append(result_dict)
pretty_print(pd.DataFrame(result_dicts))
new_nodes = get_retrieved_nodes(
"Which date did Paul Gauguin arrive in Arles?",
vector_top_k=3,
with_reranker=False,
)
visualize_retrieved_nodes(new_nodes)
new_nodes = get_retrieved_nodes(
"Which date did Paul Gauguin arrive in Arles ?",
vector_top_k=10,
reranker_top_n=3,
with_reranker=True,
)
visualize_retrieved_nodes(new_nodes)
from llama_index.llms.ollama import Ollama
llm = | Ollama(model="mistral", request_timeout=30.0) | llama_index.llms.ollama.Ollama |
get_ipython().system('pip install llama-index')
get_ipython().system('pip install wget')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-azureaisearch')
get_ipython().run_line_magic('pip', 'install azure-search-documents==11.4.0')
get_ipython().run_line_magic('llama-index-embeddings-azure-openai', '')
get_ipython().run_line_magic('llama-index-llms-azure-openai', '')
import logging
import sys
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from IPython.display import Markdown, display
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.core.settings import Settings
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.vector_stores.azureaisearch import AzureAISearchVectorStore
from llama_index.vector_stores.azureaisearch import (
IndexManagement,
MetadataIndexFieldType,
)
aoai_api_key = "YOUR_AZURE_OPENAI_API_KEY"
aoai_endpoint = "YOUR_AZURE_OPENAI_ENDPOINT"
aoai_api_version = "2023-05-15"
llm = AzureOpenAI(
model="YOUR_AZURE_OPENAI_COMPLETION_MODEL_NAME",
deployment_name="YOUR_AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME",
api_key=aoai_api_key,
azure_endpoint=aoai_endpoint,
api_version=aoai_api_version,
)
embed_model = AzureOpenAIEmbedding(
model="YOUR_AZURE_OPENAI_EMBEDDING_MODEL_NAME",
deployment_name="YOUR_AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME",
api_key=aoai_api_key,
azure_endpoint=aoai_endpoint,
api_version=aoai_api_version,
)
search_service_api_key = "YOUR-AZURE-SEARCH-SERVICE-ADMIN-KEY"
search_service_endpoint = "YOUR-AZURE-SEARCH-SERVICE-ENDPOINT"
search_service_api_version = "2023-11-01"
credential = AzureKeyCredential(search_service_api_key)
index_name = "llamaindex-vector-demo"
index_client = SearchIndexClient(
endpoint=search_service_endpoint,
credential=credential,
)
search_client = SearchClient(
endpoint=search_service_endpoint,
index_name=index_name,
credential=credential,
)
metadata_fields = {
"author": "author",
"theme": ("topic", MetadataIndexFieldType.STRING),
"director": "director",
}
vector_store = AzureAISearchVectorStore(
search_or_index_client=index_client,
filterable_metadata_field_keys=metadata_fields,
index_name=index_name,
index_management=IndexManagement.CREATE_IF_NOT_EXISTS,
id_field_key="id",
chunk_field_key="chunk",
embedding_field_key="embedding",
embedding_dimensionality=1536,
metadata_string_field_key="metadata",
doc_id_field_key="doc_id",
language_analyzer="en.lucene",
vector_algorithm_type="exhaustiveKnn",
)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("../data/paul_graham/").load_data()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
Settings.llm = llm
Settings.embed_model = embed_model
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine(similarity_top_k=3)
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
response = query_engine.query(
"What did the author learn?",
)
display(Markdown(f"<b>{response}</b>"))
index_name = "llamaindex-vector-demo"
metadata_fields = {
"author": "author",
"theme": ("topic", MetadataIndexFieldType.STRING),
"director": "director",
}
vector_store = AzureAISearchVectorStore(
search_or_index_client=search_client,
filterable_metadata_field_keys=metadata_fields,
index_management=IndexManagement.VALIDATE_INDEX,
id_field_key="id",
chunk_field_key="chunk",
embedding_field_key="embedding",
embedding_dimensionality=1536,
metadata_string_field_key="metadata",
doc_id_field_key="doc_id",
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
[],
storage_context=storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("What was a hard moment for the author?")
display(Markdown(f"<b>{response}</b>"))
response = query_engine.query("Who is the author?")
display(Markdown(f"<b>{response}</b>"))
import time
query_engine = index.as_query_engine(streaming=True)
response = query_engine.query("What happened at interleaf?")
start_time = time.time()
token_count = 0
for token in response.response_gen:
print(token, end="")
token_count += 1
time_elapsed = time.time() - start_time
tokens_per_second = token_count / time_elapsed
print(f"\n\nStreamed output at {tokens_per_second} tokens/s")
response = query_engine.query("What colour is the sky?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.core import Document
index.insert_nodes([Document(text="The sky is indigo today")])
response = query_engine.query("What colour is the sky?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
},
),
]
index.insert_nodes(nodes)
from llama_index.core.vector_stores.types import (
MetadataFilters,
ExactMatchFilter,
)
filters = MetadataFilters(
filters=[ | ExactMatchFilter(key="theme", value="Mafia") | llama_index.core.vector_stores.types.ExactMatchFilter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = CohereRerank(top_n=10)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
response = query_engine.query(
"Tell me about the main aspects of safety fine-tuning"
)
display_response(
response, show_source=True, source_length=500, show_source_metadata=True
)
from collections import defaultdict
import pandas as pd
def mrr_all(metadata_values, metadata_key, source_nodes):
value_to_mrr_dict = {}
for metadata_value in metadata_values:
mrr = 0
for idx, source_node in enumerate(source_nodes):
if source_node.node.metadata[metadata_key] == metadata_value:
mrr = 1 / (idx + 1)
break
else:
continue
value_to_mrr_dict[metadata_value] = mrr
df = pd.DataFrame(value_to_mrr_dict, index=["MRR"])
df.style.set_caption("Mean Reciprocal Rank")
return df
print("Mean Reciprocal Rank for each Chunk Size")
mrr_all(chunk_sizes, "chunk_size", response.source_nodes)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_llm = OpenAI(model="gpt-4")
dataset_generator = DatasetGenerator(
nodes_list[-1],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=2,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
import asyncio
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
RelevancyEvaluator,
FaithfulnessEvaluator,
PairwiseComparisonEvaluator,
)
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = | SemanticSimilarityEvaluator(llm=eval_llm) | llama_index.core.evaluation.SemanticSimilarityEvaluator |
import openai
openai.api_key = "sk-xxx"
from llama_index.agent.openai import OpenAIAgent
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
agent = OpenAIAgent.from_tools(
| DuckDuckGoSearchToolSpec() | llama_index.tools.duckduckgo.DuckDuckGoSearchToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system('pip install "llama_index>=0.9.7"')
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.extractors import TitleExtractor, SummaryExtractor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
def build_pipeline():
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
transformations = [
SentenceSplitter(chunk_size=1024, chunk_overlap=20),
TitleExtractor(
llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8
),
SummaryExtractor(
llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8
),
OpenAIEmbedding(),
]
return IngestionPipeline(transformations=transformations)
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
import time
times = []
for _ in range(3):
time.sleep(30) # help prevent rate-limits/timeouts, keeps each run fair
pipline = build_pipeline()
start = time.time()
nodes = await pipline.arun(documents=documents)
end = time.time()
times.append(end - start)
print(f"Average time: {sum(times) / len(times)}")
get_ipython().system('pip install "llama_index<0.9.6"')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.extractors import TitleExtractor, SummaryExtractor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
def build_pipeline():
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
transformations = [
SentenceSplitter(chunk_size=1024, chunk_overlap=20),
TitleExtractor(llm=llm, metadata_mode=MetadataMode.EMBED),
| SummaryExtractor(llm=llm, metadata_mode=MetadataMode.EMBED) | llama_index.core.extractors.SummaryExtractor |
get_ipython().system('pip install -q llama-index llama-index-vector-stores-mongodb llama-index-embeddings-fireworks==0.1.2 llama-index-llms-fireworks')
get_ipython().system('pip install -q pymongo datasets pandas')
import os
import getpass
fw_api_key = getpass.getpass("Fireworks API Key:")
os.environ["FIREWORKS_API_KEY"] = fw_api_key
from datasets import load_dataset
import pandas as pd
dataset = load_dataset("AIatMongoDB/whatscooking.restaurants")
dataset_df = pd.DataFrame(dataset["train"])
dataset_df.head(5)
from llama_index.core.settings import Settings
from llama_index.llms.fireworks import Fireworks
from llama_index.embeddings.fireworks import FireworksEmbedding
embed_model = FireworksEmbedding(
embed_batch_size=512,
model_name="nomic-ai/nomic-embed-text-v1.5",
api_key=fw_api_key,
)
llm = Fireworks(
temperature=0,
model="accounts/fireworks/models/mixtral-8x7b-instruct",
api_key=fw_api_key,
)
Settings.llm = llm
Settings.embed_model = embed_model
import json
from llama_index.core import Document
from llama_index.core.schema import MetadataMode
documents_json = dataset_df.to_json(orient="records")
documents_list = json.loads(documents_json)
llama_documents = []
for document in documents_list:
document["name"] = json.dumps(document["name"])
document["cuisine"] = json.dumps(document["cuisine"])
document["attributes"] = json.dumps(document["attributes"])
document["menu"] = json.dumps(document["menu"])
document["borough"] = json.dumps(document["borough"])
document["address"] = json.dumps(document["address"])
document["PriceRange"] = json.dumps(document["PriceRange"])
document["HappyHour"] = json.dumps(document["HappyHour"])
document["review_count"] = json.dumps(document["review_count"])
document["TakeOut"] = json.dumps(document["TakeOut"])
del document["embedding"]
del document["location"]
llama_document = Document(
text=json.dumps(document),
metadata=document,
metadata_template="{key}=>{value}",
text_template="Metadata: {metadata_str}\n-----\nContent: {content}",
)
llama_documents.append(llama_document)
print(
"\nThe LLM sees this: \n",
llama_documents[0].get_content(metadata_mode=MetadataMode.LLM),
)
print(
"\nThe Embedding model sees this: \n",
llama_documents[0].get_content(metadata_mode=MetadataMode.EMBED),
)
llama_documents[0]
from llama_index.core.node_parser import SentenceSplitter
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(llama_documents)
new_nodes = nodes[:2500]
node_embeddings = embed_model(new_nodes)
for idx, n in enumerate(new_nodes):
n.embedding = node_embeddings[idx].embedding
if "_id" in n.metadata:
del n.metadata["_id"]
import pymongo
def get_mongo_client(mongo_uri):
"""Establish connection to the MongoDB."""
try:
client = pymongo.MongoClient(mongo_uri)
print("Connection to MongoDB successful")
return client
except pymongo.errors.ConnectionFailure as e:
print(f"Connection failed: {e}")
return None
import os
import getpass
mongo_uri = getpass.getpass("MONGO_URI:")
if not mongo_uri:
print("MONGO_URI not set")
mongo_client = get_mongo_client(mongo_uri)
DB_NAME = "whatscooking"
COLLECTION_NAME = "restaurants"
db = mongo_client[DB_NAME]
collection = db[COLLECTION_NAME]
collection.delete_many({})
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
vector_store = MongoDBAtlasVectorSearch(
mongo_client,
db_name=DB_NAME,
collection_name=COLLECTION_NAME,
index_name="vector_index",
)
vector_store.add(new_nodes)
from llama_index.core import VectorStoreIndex, StorageContext
index = VectorStoreIndex.from_vector_store(vector_store)
get_ipython().run_line_magic('pip', 'install -q matplotlib')
import pprint
from llama_index.core.response.notebook_utils import display_response
query_engine = index.as_query_engine()
query = "search query: Anything that doesn't have alcohol in it"
response = query_engine.query(query)
| display_response(response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system('pip -q install python-dotenv pinecone-client llama-index pymupdf')
dotenv_path = (
"env" # Google Colabs will not let you open a .env, but you can set
)
with open(dotenv_path, "w") as f:
f.write('PINECONE_API_KEY="<your api key>"\n')
f.write('PINECONE_ENVIRONMENT="gcp-starter"\n')
f.write('OPENAI_API_KEY="<your api key>"\n')
import os
from dotenv import load_dotenv
load_dotenv(dotenv_path=dotenv_path)
import pinecone
api_key = os.environ["PINECONE_API_KEY"]
environment = os.environ["PINECONE_ENVIRONMENT"]
pinecone.init(api_key=api_key, environment=environment)
index_name = "llamaindex-rag-fs"
pinecone.create_index(
index_name, dimension=1536, metric="euclidean", pod_type="p1"
)
pinecone_index = pinecone.Index(index_name)
pinecone_index.delete(deleteAll=True)
from llama_index.vector_stores.pinecone import PineconeVectorStore
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
import fitz
file_path = "./data/llama2.pdf"
doc = fitz.open(file_path)
from llama_index.core.node_parser import SentenceSplitter
text_parser = SentenceSplitter(
chunk_size=1024,
)
text_chunks = []
doc_idxs = []
for doc_idx, page in enumerate(doc):
page_text = page.get_text("text")
cur_text_chunks = text_parser.split_text(page_text)
text_chunks.extend(cur_text_chunks)
doc_idxs.extend([doc_idx] * len(cur_text_chunks))
from llama_index.core.schema import TextNode
nodes = []
for idx, text_chunk in enumerate(text_chunks):
node = TextNode(
text=text_chunk,
)
src_doc_idx = doc_idxs[idx]
src_page = doc[src_doc_idx]
nodes.append(node)
print(nodes[0].metadata)
print(nodes[0].get_content(metadata_mode="all"))
from llama_index.core.extractors import (
QuestionsAnsweredExtractor,
TitleExtractor,
)
from llama_index.core.ingestion import IngestionPipeline
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
extractors = [
| TitleExtractor(nodes=5, llm=llm) | llama_index.core.extractors.TitleExtractor |
import openai
openai.api_key = "sk-you-key"
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
from llama_index.tools.zapier.base import ZapierToolSpec
zapier_spec = ZapierToolSpec(api_key="sk-ak-your-key")
tools = zapier_spec.to_tool_list()
llm = | OpenAI(model="gpt-4-0613") | llama_index.llms.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = CohereRerank(top_n=10)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
response = query_engine.query(
"Tell me about the main aspects of safety fine-tuning"
)
display_response(
response, show_source=True, source_length=500, show_source_metadata=True
)
from collections import defaultdict
import pandas as pd
def mrr_all(metadata_values, metadata_key, source_nodes):
value_to_mrr_dict = {}
for metadata_value in metadata_values:
mrr = 0
for idx, source_node in enumerate(source_nodes):
if source_node.node.metadata[metadata_key] == metadata_value:
mrr = 1 / (idx + 1)
break
else:
continue
value_to_mrr_dict[metadata_value] = mrr
df = pd.DataFrame(value_to_mrr_dict, index=["MRR"])
df.style.set_caption("Mean Reciprocal Rank")
return df
print("Mean Reciprocal Rank for each Chunk Size")
mrr_all(chunk_sizes, "chunk_size", response.source_nodes)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_llm = OpenAI(model="gpt-4")
dataset_generator = DatasetGenerator(
nodes_list[-1],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=2,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
import asyncio
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
RelevancyEvaluator,
FaithfulnessEvaluator,
PairwiseComparisonEvaluator,
)
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_r = RelevancyEvaluator(llm=eval_llm)
evaluator_f = FaithfulnessEvaluator(llm=eval_llm)
pairwise_evaluator = | PairwiseComparisonEvaluator(llm=eval_llm) | llama_index.core.evaluation.PairwiseComparisonEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-llms-mistralai')
get_ipython().system('pip install llama-index')
from llama_index.llms.mistralai import MistralAI
llm = MistralAI()
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(role="system", content="You are CEO of MistralAI."),
ChatMessage(role="user", content="Tell me the story about La plateforme"),
]
resp = | MistralAI() | llama_index.llms.mistralai.MistralAI |
import openai
openai.api_key = "sk-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.code_interpreter.base import CodeInterpreterToolSpec
code_spec = CodeInterpreterToolSpec()
tools = code_spec.to_tool_list()
agent = | OpenAIAgent.from_tools(tools, verbose=True) | llama_index.agent.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray')
get_ipython().system('pip install llama-index')
import os
import sys
import logging
import textwrap
import warnings
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from llama_index.core import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
Document,
)
from llama_index.vector_stores.docarray import DocArrayInMemoryVectorStore
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "<your openai key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
from llama_index.core import StorageContext
vector_store = DocArrayInMemoryVectorStore()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = GPTVectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What was a hard moment for the author?")
print(textwrap.fill(str(response), 100))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
},
),
]
from llama_index.core import StorageContext
vector_store = DocArrayInMemoryVectorStore()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = | GPTVectorStoreIndex(nodes, storage_context=storage_context) | llama_index.core.GPTVectorStoreIndex |
from llama_hub.semanticscholar.base import SemanticScholarReader
import os
import openai
from llama_index.llms import OpenAI
from llama_index.query_engine import CitationQueryEngine
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
ServiceContext,
)
from llama_index.response.notebook_utils import display_response
s2reader = SemanticScholarReader()
openai.api_key = os.environ["OPENAI_API_KEY"]
service_context = ServiceContext.from_defaults(
llm=OpenAI(model="gpt-3.5-turbo", temperature=0)
)
query_space = "large language models"
full_text = True
total_papers = 50
persist_dir = (
"./citation_" + query_space + "_" + str(total_papers) + "_" + str(full_text)
)
if not os.path.exists(persist_dir):
documents = s2reader.load_data(query_space, total_papers, full_text=full_text)
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir=persist_dir)
else:
index = load_index_from_storage(
| StorageContext.from_defaults(persist_dir=persist_dir) | llama_index.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().system('pip install llama-index gradientai -q')
import os
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.finetuning import GradientFinetuneEngine
os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY")
os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>"
from pydantic import BaseModel
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.llms.openai import OpenAI
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
openai_handler = LlamaDebugHandler()
openai_callback = CallbackManager([openai_handler])
openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback)
gradient_handler = LlamaDebugHandler()
gradient_callback = CallbackManager([gradient_handler])
base_model_slug = "llama2-7b-chat"
gradient_llm = GradientBaseModelLLM(
base_model_slug=base_model_slug,
max_tokens=300,
callback_manager=gradient_callback,
is_chat_model=True,
)
from llama_index.core.llms import LLMMetadata
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
openai_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=openai_llm,
verbose=True,
)
gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=gradient_llm,
verbose=True,
)
response = openai_program(movie_name="The Shining")
print(str(response))
tmp = openai_handler.get_llm_inputs_outputs()
print(tmp[0][0].payload["messages"][0])
response = gradient_program(movie_name="The Shining")
print(str(response))
tmp = gradient_handler.get_llm_inputs_outputs()
print(tmp[0][0].payload["messages"][0])
from llama_index.core.program import LLMTextCompletionProgram
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import GradientAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.output_parsers import PydanticOutputParser
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
finetuning_handler = GradientAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm_gpt4 = OpenAI(model="gpt-4", callback_manager=callback_manager)
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
openai_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=llm_gpt4,
verbose=True,
)
movie_names = [
"The Shining",
"The Departed",
"Titanic",
"Goodfellas",
"Pretty Woman",
"Home Alone",
"Caged Fury",
"Edward Scissorhands",
"Total Recall",
"Ghost",
"Tremors",
"RoboCop",
"Rocky V",
]
from tqdm.notebook import tqdm
for movie_name in tqdm(movie_names):
output = openai_program(movie_name=movie_name)
print(output.json())
events = finetuning_handler.get_finetuning_events()
events
finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl")
get_ipython().system('cat mock_finetune_songs.jsonl')
base_model_slug = "llama2-7b-chat"
base_llm = GradientBaseModelLLM(
base_model_slug=base_model_slug, max_tokens=500, is_chat_model=True
)
from llama_index.finetuning import GradientFinetuneEngine
finetune_engine = GradientFinetuneEngine(
base_model_slug=base_model_slug,
name="movies_structured",
data_path="mock_finetune_songs.jsonl",
verbose=True,
max_steps=200,
batch_size=1,
)
finetune_engine.model_adapter_id
epochs = 2
for i in range(epochs):
print(f"** EPOCH {i} **")
finetune_engine.finetune()
ft_llm = finetune_engine.get_finetuned_model(
max_tokens=500, is_chat_model=True
)
from llama_index.llms.gradient import GradientModelAdapterLLM
new_prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
Please only generate one album.
"""
gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=new_prompt_template_str,
llm=ft_llm,
verbose=True,
)
gradient_program(movie_name="Goodfellas")
gradient_program(movie_name="Chucky")
base_gradient_program = LLMTextCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Album),
prompt_template_str=prompt_template_str,
llm=base_llm,
verbose=True,
)
base_gradient_program(movie_name="Goodfellas")
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pydantic import Field
from typing import List
class Citation(BaseModel):
"""Citation class."""
author: str = Field(
..., description="Inferred first author (usually last name"
)
year: int = Field(..., description="Inferred year")
desc: str = Field(
...,
description=(
"Inferred description from the text of the work that the author is"
" cited for"
),
)
class Response(BaseModel):
"""List of author citations.
Extracted over unstructured text.
"""
citations: List[Citation] = Field(
...,
description=(
"List of author citations (organized by author, year, and"
" description)."
),
)
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SimpleNodeParser
from pathlib import Path
from llama_index.core.callbacks import GradientAIFineTuningHandler
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
metadata = {
"paper_title": "Llama 2: Open Foundation and Fine-Tuned Chat Models"
}
docs = [Document(text=doc_text, metadata=metadata)]
chunk_size = 1024
node_parser = | SimpleNodeParser.from_defaults(chunk_size=chunk_size) | llama_index.core.node_parser.SimpleNodeParser.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
TABLE_NAME = os.environ["DYNAMODB_TABLE_NAME"]
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
from llama_index.storage.index_store.dynamodb import DynamoDBIndexStore
from llama_index.vector_stores.dynamodb import DynamoDBVectorStore
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store= | DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME) | llama_index.storage.index_store.dynamodb.DynamoDBIndexStore.from_table_name |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system("mkdir -p 'data/'")
get_ipython().system("curl 'https://arxiv.org/pdf/2307.09288.pdf' -o 'data/llama2.pdf'")
get_ipython().system('pip install unstructured[pdf]')
from llama_index.core import VectorStoreIndex
from llama_index.readers.file import UnstructuredReader
documents = UnstructuredReader().load_data("data/llama2.pdf")
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
from llama_index.core.llama_pack import download_llama_pack
FuzzyCitationEnginePack = | download_llama_pack("FuzzyCitationEnginePack", "./fuzzy_pack") | llama_index.core.llama_pack.download_llama_pack |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = | FunctionTool.from_defaults(fn=multiply) | llama_index.core.tools.FunctionTool.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama_hub')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
index = VectorStoreIndex(base_nodes)
query_engine = index.as_query_engine(similarity_top_k=2)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json')
from llama_index.core.evaluation import QueryResponseDataset
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
from llama_index.core.evaluation.eval_utils import get_responses
from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner
evaluator_c = | CorrectnessEvaluator() | llama_index.core.evaluation.CorrectnessEvaluator |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import SimpleDirectoryReader, VectorStoreIndex
import requests
response = requests.get(
"https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1"
)
essay_txt = response.text
with open("pg_essay.txt", "w") as fp:
fp.write(essay_txt)
documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
query_engine_tool = QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name="paul_graham",
description=(
"Provides a biography of Paul Graham, from childhood to college to adult"
" life"
),
),
)
from llama_index.tools.text_to_image.base import TextToImageToolSpec
from llama_index.llms import OpenAI
llm = | OpenAI(model="gpt-4") | llama_index.llms.OpenAI |
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.postprocessor import LLMRerank
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core import Settings
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.packs.koda_retriever import KodaRetriever
import os
from pinecone import Pinecone
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index = pc.Index("sample-movies")
Settings.llm = OpenAI()
Settings.embed_model = | OpenAIEmbedding() | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import TransformQueryEngine
from IPython.display import Markdown, display
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_str = "what did paul graham do after going to RISD"
query_engine = index.as_query_engine()
response = query_engine.query(query_str)
display(Markdown(f"<b>{response}</b>"))
hyde = HyDEQueryTransform(include_original=True)
hyde_query_engine = TransformQueryEngine(query_engine, hyde)
response = hyde_query_engine.query(query_str)
display(Markdown(f"<b>{response}</b>"))
query_bundle = hyde(query_str)
hyde_doc = query_bundle.embedding_strs[0]
hyde_doc
query_str = "What is Bel?"
response = query_engine.query(query_str)
display(Markdown(f"<b>{response}</b>"))
hyde = HyDEQueryTransform(include_original=True)
hyde_query_engine = | TransformQueryEngine(query_engine, hyde) | llama_index.core.query_engine.TransformQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().system('pip install llama-index')
from llama_index.llms.anthropic import Anthropic
from llama_index.core import Settings
tokenizer = Anthropic().tokenizer
Settings.tokenizer = tokenizer
import os
os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY"
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229")
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.anthropic import Anthropic
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = | Anthropic(model="claude-3-opus-20240229") | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-readers-psychic')
get_ipython().system('pip install llama-index')
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SummaryIndex
from llama_index.readers.psychic import PsychicReader
from IPython.display import Markdown, display
psychic_key = "PSYCHIC_API_KEY"
account_id = "ACCOUNT_ID"
connector_id = "notion"
documents = | PsychicReader(psychic_key=psychic_key) | llama_index.readers.psychic.PsychicReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents, chunk_size=512)
from llama_index.core.output_parsers import LangchainOutputParser
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
response_schemas = [
ResponseSchema(
name="Education",
description=(
"Describes the author's educational experience/background."
),
),
ResponseSchema(
name="Work",
description="Describes the author's work experience/background.",
),
]
lc_output_parser = StructuredOutputParser.from_response_schemas(
response_schemas
)
output_parser = | LangchainOutputParser(lc_output_parser) | llama_index.core.output_parsers.LangchainOutputParser |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import os
os.environ[
"PINECONE_API_KEY"
] = "<Your Pinecone API key, from app.pinecone.io>"
from pinecone import Pinecone
from pinecone import ServerlessSpec
api_key = os.environ["PINECONE_API_KEY"]
pc = Pinecone(api_key=api_key)
try:
pc.create_index(
"quickstart-index",
dimension=1536,
metric="euclidean",
spec=ServerlessSpec(cloud="aws", region="us-west-2"),
)
except Exception as e:
print(e)
pass
pinecone_index = pc.Index("quickstart-index")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Fiction",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index,
namespace="test",
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.retrievers import VectorIndexAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="famous books and movies",
metadata_info=[
MetadataInfo(
name="director",
type="str",
description=("Name of the director"),
),
MetadataInfo(
name="theme",
type="str",
description=("Theme of the book/movie"),
),
MetadataInfo(
name="year",
type="int",
description=("Year of the book/movie"),
),
],
)
retriever = VectorIndexAutoRetriever(
index,
vector_store_info=vector_store_info,
empty_query_top_k=10,
default_empty_query_vector=[0] * 1536,
verbose=True,
)
nodes = retriever.retrieve(
"Tell me about some books/movies after the year 2000"
)
for node in nodes:
print(node.text)
print(node.metadata)
nodes = retriever.retrieve("Tell me about some books that are Fiction")
for node in nodes:
print(node.text)
print(node.metadata)
from llama_index.core.vector_stores import MetadataFilters
filter_dicts = [{"key": "year", "operator": "==", "value": 1997}]
filters = MetadataFilters.from_dicts(filter_dicts)
retriever2 = VectorIndexAutoRetriever(
index,
vector_store_info=vector_store_info,
empty_query_top_k=10,
default_empty_query_vector=[0] * 1536,
extra_filters=filters,
)
nodes = retriever2.retrieve("Tell me about some books that are Fiction")
for node in nodes:
print(node.text)
print(node.metadata)
nodes = retriever.retrieve("Tell me about some books that are mafia-themed")
for node in nodes:
print(node.text)
print(node.metadata)
from llama_index.core.prompts import display_prompt_dict
from llama_index.core import PromptTemplate
prompts_dict = retriever.get_prompts()
| display_prompt_dict(prompts_dict) | llama_index.core.prompts.display_prompt_dict |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"')
from llama_index.core import download_loader
from llama_index.readers.file import PyMuPDFReader
llama2_docs = PyMuPDFReader().load_data(
file_path="./llama2.pdf", metadata=True
)
attention_docs = PyMuPDFReader().load_data(
file_path="./attention.pdf", metadata=True
)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core.node_parser import TokenTextSplitter
nodes = TokenTextSplitter(
chunk_size=1024, chunk_overlap=128
).get_nodes_from_documents(llama2_docs + attention_docs)
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore("composable", client=client)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-monsterapi')
get_ipython().system('python3 -m pip install llama-index --quiet -y')
get_ipython().system('python3 -m pip install monsterapi --quiet')
get_ipython().system('python3 -m pip install sentence_transformers --quiet')
import os
from llama_index.llms.monsterapi import MonsterLLM
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
os.environ["MONSTER_API_KEY"] = ""
model = "llama2-7b-chat"
llm = MonsterLLM(model=model, temperature=0.75)
result = llm.complete("Who are you?")
print(result)
from llama_index.core.llms import ChatMessage
history_message = ChatMessage(
**{
"role": "user",
"content": (
"When asked 'who are you?' respond as 'I am qblocks llm model'"
" everytime."
),
}
)
current_message = ChatMessage(**{"role": "user", "content": "Who are you?"})
response = llm.chat([history_message, current_message])
print(response)
get_ipython().system('python3 -m pip install pypdf --quiet')
get_ipython().system('rm -r ./data')
get_ipython().system('mkdir -p data&&cd data&&curl \'https://arxiv.org/pdf/2005.11401.pdf\' -o "RAG.pdf"')
documents = SimpleDirectoryReader("./data").load_data()
llm = MonsterLLM(model=model, temperature=0.75, context_window=1024)
embed_model = | resolve_embed_model("local:BAAI/bge-small-en-v1.5") | llama_index.core.embeddings.resolve_embed_model |
get_ipython().system('pip install llama_index')
get_ipython().system('pip install llama_hub')
get_ipython().system('pip install torch_geometric')
import os
from pprint import pprint
from llama_index import (
ServiceContext,
VectorStoreIndex,
SummaryIndex,
)
import llama_hub.docstring_walker as docstring_walker
walker = docstring_walker.DocstringWalker()
path_to_docstring_walker = os.path.dirname(docstring_walker.__file__)
example1_docs = walker.load_data(path_to_docstring_walker)
print(example1_docs[0].text)
example1_index = | VectorStoreIndex(example1_docs) | llama_index.VectorStoreIndex |