prompt
stringlengths 70
19.8k
| completion
stringlengths 8
1.03k
| api
stringlengths 23
93
|
---|---|---|
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
SimpleKeywordTableIndex,
)
from llama_index.core import SummaryIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
llm = OpenAI(model="gpt-4")
splitter = | SentenceSplitter(chunk_size=1024) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().system('pip install llama-index')
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west4-gcp-free")
import os
import getpass
import openai
openai.api_key = "sk-<your-key>"
try:
pinecone.create_index(
"quickstart-index", dimension=1536, metric="euclidean", pod_type="p1"
)
except Exception:
pass
pinecone_index = pinecone.Index("quickstart-index")
pinecone_index.delete(deleteAll=True, namespace="test")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text=(
"Michael Jordan is a retired professional basketball player,"
" widely regarded as one of the greatest basketball players of all"
" time."
),
metadata={
"category": "Sports",
"country": "United States",
"gender": "male",
"born": 1963,
},
),
TextNode(
text=(
"Angelina Jolie is an American actress, filmmaker, and"
" humanitarian. She has received numerous awards for her acting"
" and is known for her philanthropic work."
),
metadata={
"category": "Entertainment",
"country": "United States",
"gender": "female",
"born": 1975,
},
),
TextNode(
text=(
"Elon Musk is a business magnate, industrial designer, and"
" engineer. He is the founder, CEO, and lead designer of SpaceX,"
" Tesla, Inc., Neuralink, and The Boring Company."
),
metadata={
"category": "Business",
"country": "United States",
"gender": "male",
"born": 1971,
},
),
TextNode(
text=(
"Rihanna is a Barbadian singer, actress, and businesswoman. She"
" has achieved significant success in the music industry and is"
" known for her versatile musical style."
),
metadata={
"category": "Music",
"country": "Barbados",
"gender": "female",
"born": 1988,
},
),
TextNode(
text=(
"Cristiano Ronaldo is a Portuguese professional footballer who is"
" considered one of the greatest football players of all time. He"
" has won numerous awards and set multiple records during his"
" career."
),
metadata={
"category": "Sports",
"country": "Portugal",
"gender": "male",
"born": 1985,
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="test"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.tools import FunctionTool
from llama_index.core.vector_stores import (
VectorStoreInfo,
MetadataInfo,
MetadataFilter,
MetadataFilters,
FilterCondition,
FilterOperator,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from typing import List, Tuple, Any
from pydantic import BaseModel, Field
top_k = 3
vector_store_info = VectorStoreInfo(
content_info="brief biography of celebrities",
metadata_info=[
MetadataInfo(
name="category",
type="str",
description=(
"Category of the celebrity, one of [Sports, Entertainment,"
" Business, Music]"
),
),
MetadataInfo(
name="country",
type="str",
description=(
"Country of the celebrity, one of [United States, Barbados,"
" Portugal]"
),
),
MetadataInfo(
name="gender",
type="str",
description=("Gender of the celebrity, one of [male, female]"),
),
MetadataInfo(
name="born",
type="int",
description=("Born year of the celebrity, could be any integer"),
),
],
)
class AutoRetrieveModel(BaseModel):
query: str = Field(..., description="natural language query string")
filter_key_list: List[str] = Field(
..., description="List of metadata filter field names"
)
filter_value_list: List[Any] = Field(
...,
description=(
"List of metadata filter field values (corresponding to names"
" specified in filter_key_list)"
),
)
filter_operator_list: List[str] = Field(
...,
description=(
"Metadata filters conditions (could be one of <, <=, >, >=, ==, !=)"
),
)
filter_condition: str = Field(
...,
description=("Metadata filters condition values (could be AND or OR)"),
)
description = f"""\
Use this tool to look up biographical information about celebrities.
The vector database schema is given below:
{vector_store_info.json()}
"""
def auto_retrieve_fn(
query: str,
filter_key_list: List[str],
filter_value_list: List[any],
filter_operator_list: List[str],
filter_condition: str,
):
"""Auto retrieval function.
Performs auto-retrieval from a vector database, and then applies a set of filters.
"""
query = query or "Query"
metadata_filters = [
MetadataFilter(key=k, value=v, operator=op)
for k, v, op in zip(
filter_key_list, filter_value_list, filter_operator_list
)
]
retriever = VectorIndexRetriever(
index,
filters=MetadataFilters(
filters=metadata_filters, condition=filter_condition
),
top_k=top_k,
)
query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query(query)
return str(response)
auto_retrieve_tool = FunctionTool.from_defaults(
fn=auto_retrieve_fn,
name="celebrity_bios",
description=description,
fn_schema=AutoRetrieveModel,
)
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
agent = OpenAIAgent.from_tools(
[auto_retrieve_tool],
llm=OpenAI(temperature=0, model="gpt-4-0613"),
verbose=True,
)
response = agent.chat("Tell me about two celebrities from the United States. ")
print(str(response))
response = agent.chat("Tell me about two celebrities born after 1980. ")
print(str(response))
response = agent.chat(
"Tell me about few celebrities under category business and born after 1950. "
)
print(str(response))
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
from llama_index.core import SQLDatabase
from llama_index.core.indices import SQLStructStoreIndex
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
from llama_index.core.query_engine import NLSQLTableQueryEngine
query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["city_stats"],
)
get_ipython().system('pip install wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = WikipediaReader().load_data(pages=cities)
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp")
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.core import Settings
from llama_index.core import StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.llms.openai import OpenAI
Settings.llm = OpenAI(temperature=0, model="gpt-4")
Settings.node_parser = TokenTextSplitter(chunk_size=1024)
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="wiki_cities"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
vector_index = VectorStoreIndex([], storage_context=storage_context)
for city, wiki_doc in zip(cities, wiki_docs):
nodes = Settings.node_parser.get_nodes_from_documents([wiki_doc])
for node in nodes:
node.metadata = {"title": city}
vector_index.insert_nodes(nodes)
from llama_index.core.retrievers import VectorIndexAutoRetriever
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.tools import QueryEngineTool
vector_store_info = VectorStoreInfo(
content_info="articles about different cities",
metadata_info=[
MetadataInfo(
name="title", type="str", description="The name of the city"
),
],
)
vector_auto_retriever = VectorIndexAutoRetriever(
vector_index, vector_store_info=vector_store_info
)
retriever_query_engine = RetrieverQueryEngine.from_args(
vector_auto_retriever,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query over"
" a table containing: city_stats, containing the population/country of"
" each city"
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=retriever_query_engine,
name="vector_tool",
description=(
f"Useful for answering semantic questions about different cities"
),
)
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
agent = OpenAIAgent.from_tools(
[sql_tool, vector_tool],
llm= | OpenAI(temperature=0, model="gpt-4-0613") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-faiss')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import faiss
d = 1536
faiss_index = faiss.IndexFlatL2(d)
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.faiss import FaissVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = FaissVectorStore(faiss_index=faiss_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
index.storage_context.persist()
vector_store = | FaissVectorStore.from_persist_dir("./storage") | llama_index.vector_stores.faiss.FaissVectorStore.from_persist_dir |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import TimeWeightedPostprocessor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from datetime import datetime, timedelta
from llama_index.core import StorageContext
now = datetime.now()
key = "__last_accessed__"
doc1 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v1.txt"]
).load_data()[0]
doc2 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v2.txt"]
).load_data()[0]
doc3 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v3.txt"]
).load_data()[0]
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
nodes1 = Settings.text_splitter.get_nodes_from_documents([doc1])
nodes2 = Settings.text_splitter.get_nodes_from_documents([doc2])
nodes3 = Settings.text_splitter.get_nodes_from_documents([doc3])
nodes1[14].metadata[key] = (now - timedelta(hours=3)).timestamp()
nodes1[14].excluded_llm_metadata_keys = [key]
nodes2[14].metadata[key] = (now - timedelta(hours=2)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
nodes3[14].metadata[key] = (now - timedelta(hours=1)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
docstore = SimpleDocumentStore()
nodes = [nodes1[14], nodes2[14], nodes3[14]]
docstore.add_documents(nodes)
storage_context = StorageContext.from_defaults(docstore=docstore)
index = VectorStoreIndex(nodes, storage_context=storage_context)
node_postprocessor = TimeWeightedPostprocessor(
time_decay=0.5, time_access_refresh=False, top_k=1
)
query_engine = index.as_query_engine(
similarity_top_k=3,
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
display_response(response)
query_engine = index.as_query_engine(
similarity_top_k=3, node_postprocessors=[node_postprocessor]
)
response = query_engine.query(
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?",
)
display_response(response)
from llama_index.core import SummaryIndex
query_str = (
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?"
)
query_engine = index.as_query_engine(
similarity_top_k=3, response_mode="no_text"
)
init_response = query_engine.query(
query_str,
)
resp_nodes = [n for n in init_response.source_nodes]
new_resp_nodes = node_postprocessor.postprocess_nodes(resp_nodes)
summary_index = | SummaryIndex([n.node for n in new_resp_nodes]) | llama_index.core.SummaryIndex |
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-uptrain')
get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas tqdm uptrain torch sentence-transformers')
from llama_index.core import Settings, VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.readers.web import SimpleWebPageReader
from llama_index.core.callbacks import CallbackManager
from llama_index.callbacks.uptrain.base import UpTrainCallbackHandler
from llama_index.core.query_engine import SubQuestionQueryEngine
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.postprocessor import SentenceTransformerRerank
from llama_index.llms.openai import OpenAI
import os
os.environ[
"OPENAI_API_KEY"
] = "sk-************" # Replace with your OpenAI API key
callback_handler = UpTrainCallbackHandler(
key_type="openai",
api_key=os.environ["OPENAI_API_KEY"],
project_name_prefix="llama",
)
Settings.callback_manager = CallbackManager([callback_handler])
documents = SimpleWebPageReader().load_data(
[
"https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt"
]
)
parser = | SentenceSplitter() | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.vector_stores.pinecone import PineconeVectorStore
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import StorageContext
splitter = | SentenceSplitter(chunk_size=1024) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SQLDatabase
from llama_index.readers.wikipedia import WikipediaReader
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
get_ipython().system('pip install wikipedia')
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = WikipediaReader().load_data(pages=cities)
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
from llama_index.core.query_engine import NLSQLTableQueryEngine
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["city_stats"],
)
vector_indices = []
for wiki_doc in wiki_docs:
vector_index = | VectorStoreIndex.from_documents([wiki_doc]) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla')
get_ipython().system('pip/pip3 install pyepsilla')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.epsilla import EpsillaVectorStore
import textwrap
import openai
import getpass
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
openai.api_key = OPENAI_API_KEY
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(f"Total documents: {len(documents)}")
print(f"First document, id: {documents[0].doc_id}")
print(f"First document, hash: {documents[0].hash}")
from pyepsilla import vectordb
client = vectordb.Client()
vector_store = EpsillaVectorStore(client=client, db_path="/tmp/llamastore")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("How did the author learn about AI?")
print(textwrap.fill(str(response), 100))
vector_store = EpsillaVectorStore(client=client, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
single_doc = Document(text="Epsilla is the vector database we are using.")
index = VectorStoreIndex.from_documents(
[single_doc],
storage_context=storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What vector database is being used?")
print(textwrap.fill(str(response), 100))
vector_store = EpsillaVectorStore(client=client, overwrite=False)
index = | VectorStoreIndex.from_vector_store(vector_store=vector_store) | llama_index.core.VectorStoreIndex.from_vector_store |
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
from llama_index.core.callbacks import CallbackManager
from llama_index.core.callbacks import LlamaDebugHandler
from llama_index.callbacks.wandb import WandbCallbackHandler
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
SimpleKeywordTableIndex,
StorageContext,
)
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-4", temperature=0)
import llama_index.core
from llama_index.core import set_global_handler
| set_global_handler("wandb", run_args={"project": "llamaindex"}) | llama_index.core.set_global_handler |
get_ipython().run_line_magic('pip', 'install llama-index-readers-twitter')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex
from llama_index.readers.twitter import TwitterTweetReader
from IPython.display import Markdown, display
import os
BEARER_TOKEN = "<bearer_token>"
reader = | TwitterTweetReader(BEARER_TOKEN) | llama_index.readers.twitter.TwitterTweetReader |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('mkdir -p data')
get_ipython().system('echo "This is a test file: one!" > data/test1.txt')
get_ipython().system('echo "This is a test file: two!" > data/test2.txt')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data", filename_as_id=True).load_data()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.core.node_parser import SentenceSplitter
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5"),
],
docstore=SimpleDocumentStore(),
)
nodes = pipeline.run(documents=documents)
print(f"Ingested {len(nodes)} Nodes")
pipeline.persist("./pipeline_storage")
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
| HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") | llama_index.embeddings.huggingface.HuggingFaceEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks')
get_ipython().run_line_magic('pip', 'install llama-index')
from llama_index.llms.fireworks import Fireworks
resp = Fireworks().complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.fireworks import Fireworks
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Fireworks().chat(messages)
print(resp)
from llama_index.llms.fireworks import Fireworks
llm = Fireworks()
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.fireworks import Fireworks
from llama_index.core.llms import ChatMessage
llm = Fireworks()
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
from llama_index.llms.fireworks import Fireworks
llm = | Fireworks(model="accounts/fireworks/models/firefunction-v1") | llama_index.llms.fireworks.Fireworks |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(vector_store_query_mode="mmr")
response = query_engine.query("What did the author do growing up?")
print(response)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = | SimpleDirectoryReader("./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('pip install "google-generativeai" -q')
from llama_index.core.llama_dataset import download_llama_dataset
evaluator_dataset, _ = download_llama_dataset(
"MiniMtBenchSingleGradingDataset", "./mini_mt_bench_data"
)
evaluator_dataset.to_pandas()[:5]
from llama_index.core.evaluation import CorrectnessEvaluator
from llama_index.llms.openai import OpenAI
from llama_index.llms.gemini import Gemini
from llama_index.llms.cohere import Cohere
llm_gpt4 = | OpenAI(temperature=0, model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-neo4jvector')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "OPENAI_API_KEY"
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.vector_stores.neo4jvector import Neo4jVectorStore
username = "neo4j"
password = "pleaseletmein"
url = "bolt://localhost:7687"
embed_dim = 1536
neo4j_vector = Neo4jVectorStore(username, password, url, embed_dim)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults(vector_store=neo4j_vector)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What happened at interleaf?")
display(Markdown(f"<b>{response}</b>"))
neo4j_vector_hybrid = Neo4jVectorStore(
username, password, url, embed_dim, hybrid_search=True
)
storage_context = StorageContext.from_defaults(
vector_store=neo4j_vector_hybrid
)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What happened at interleaf?")
display(Markdown(f"<b>{response}</b>"))
index_name = "existing_index"
text_node_property = "text"
existing_vector = Neo4jVectorStore(
username,
password,
url,
embed_dim,
index_name=index_name,
text_node_property=text_node_property,
)
loaded_index = | VectorStoreIndex.from_vector_store(existing_vector) | llama_index.core.VectorStoreIndex.from_vector_store |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY')
get_ipython().system('pip install llama-index pypdf')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [
SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes
]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
| display_source_node(node, source_length=2000) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 1024
nodes = Settings.node_parser.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
summary_index = | SummaryIndex(nodes, storage_context=storage_context) | llama_index.core.SummaryIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = | SentenceSplitter(chunk_size=1024) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-mistralai')
get_ipython().system('pip install llama-index')
from llama_index.llms.mistralai import MistralAI
llm = MistralAI()
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
| ChatMessage(role="system", content="You are CEO of MistralAI.") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.core.storage.docstore import SimpleDocumentStore
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
from llama_index.core import StorageContext
storage_context = | StorageContext.from_defaults(docstore=docstore) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.evaluation.benchmarks import BeirEvaluator
from llama_index.core import VectorStoreIndex
def create_retriever(documents):
embed_model = | HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") | llama_index.embeddings.huggingface.HuggingFaceEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import (
FaithfulnessEvaluator,
RelevancyEvaluator,
CorrectnessEvaluator,
)
from llama_index.core.node_parser import SentenceSplitter
import pandas as pd
pd.set_option("display.max_colwidth", 0)
gpt4 = OpenAI(temperature=0, model="gpt-4")
faithfulness_gpt4 = FaithfulnessEvaluator(llm=gpt4)
relevancy_gpt4 = RelevancyEvaluator(llm=gpt4)
correctness_gpt4 = CorrectnessEvaluator(llm=gpt4)
documents = SimpleDirectoryReader("./test_wiki_data/").load_data()
llm = | OpenAI(temperature=0.3, model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-milvus')
get_ipython().system(' pip install llama-index')
import logging
import sys
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.vector_stores.milvus import MilvusVectorStore
from IPython.display import Markdown, display
import textwrap
import openai
openai.api_key = "sk-"
get_ipython().system(" mkdir -p 'data/paul_graham/'")
get_ipython().system(" wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = | SimpleDirectoryReader("./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-cohere')
get_ipython().system('pip install llama-index cohere pypdf')
openai_api_key = "YOUR OPENAI API KEY"
cohere_api_key = "YOUR COHEREAI API KEY"
import os
os.environ["OPENAI_API_KEY"] = openai_api_key
os.environ["COHERE_API_KEY"] = cohere_api_key
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.cohere import CohereEmbedding
from llama_index.core.retrievers import BaseRetriever, VectorIndexRetriever
from llama_index.core import QueryBundle
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWithScore
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.evaluation import EmbeddingQAFinetuneDataset
from llama_index.finetuning import generate_cohere_reranker_finetuning_dataset
from llama_index.core.evaluation import generate_question_context_pairs
from llama_index.core.evaluation import RetrieverEvaluator
from llama_index.finetuning import CohereRerankerFinetuneEngine
from typing import List
import pandas as pd
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
lyft_docs = SimpleDirectoryReader(
input_files=["./data/10k/lyft_2021.pdf"]
).load_data()
uber_docs = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
node_parser = SimpleNodeParser.from_defaults(chunk_size=400)
lyft_nodes = node_parser.get_nodes_from_documents(lyft_docs)
uber_nodes = node_parser.get_nodes_from_documents(uber_docs)
llm = OpenAI(temperature=0, model="gpt-4")
qa_generate_prompt_tmpl = """\
Context information is below.
---------------------
{context_str}
---------------------
Given the context information and not prior knowledge.
generate only questions based on the below query.
You are a Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. The questions should not contain options, not start with Q1/ Q2. \
Restrict the questions to the context information provided.\
"""
qa_dataset_lyft_train = generate_question_context_pairs(
lyft_nodes[:256],
llm=llm,
num_questions_per_chunk=1,
qa_generate_prompt_tmpl=qa_generate_prompt_tmpl,
)
qa_dataset_lyft_train.save_json("lyft_train_dataset.json")
qa_dataset_lyft_val = generate_question_context_pairs(
lyft_nodes[257:321],
llm=llm,
num_questions_per_chunk=1,
qa_generate_prompt_tmpl=qa_generate_prompt_tmpl,
)
qa_dataset_lyft_val.save_json("lyft_val_dataset.json")
qa_dataset_uber_val = generate_question_context_pairs(
uber_nodes[:150],
llm=llm,
num_questions_per_chunk=1,
qa_generate_prompt_tmpl=qa_generate_prompt_tmpl,
)
qa_dataset_uber_val.save_json("uber_val_dataset.json")
embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key,
model_name="embed-english-v3.0",
input_type="search_document",
)
generate_cohere_reranker_finetuning_dataset(
qa_dataset_lyft_train, finetune_dataset_file_name="train.jsonl"
)
generate_cohere_reranker_finetuning_dataset(
qa_dataset_lyft_val, finetune_dataset_file_name="val.jsonl"
)
generate_cohere_reranker_finetuning_dataset(
qa_dataset_lyft_train,
num_negatives=5,
hard_negatives_gen_method="random",
finetune_dataset_file_name="train_5_random.jsonl",
embed_model=embed_model,
)
generate_cohere_reranker_finetuning_dataset(
qa_dataset_lyft_val,
num_negatives=5,
hard_negatives_gen_method="random",
finetune_dataset_file_name="val_5_random.jsonl",
embed_model=embed_model,
)
generate_cohere_reranker_finetuning_dataset(
qa_dataset_lyft_train,
num_negatives=5,
hard_negatives_gen_method="cosine_similarity",
finetune_dataset_file_name="train_5_cosine_similarity.jsonl",
embed_model=embed_model,
)
generate_cohere_reranker_finetuning_dataset(
qa_dataset_lyft_val,
num_negatives=5,
hard_negatives_gen_method="cosine_similarity",
finetune_dataset_file_name="val_5_cosine_similarity.jsonl",
embed_model=embed_model,
)
finetune_model_no_hard_negatives = CohereRerankerFinetuneEngine(
train_file_name="train.jsonl",
val_file_name="val.jsonl",
model_name="lyft_reranker_0_hard_negatives",
model_type="RERANK",
base_model="english",
)
finetune_model_no_hard_negatives.finetune()
finetune_model_random_hard_negatives = CohereRerankerFinetuneEngine(
train_file_name="train_5_random.jsonl",
val_file_name="val_5_random.jsonl",
model_name="lyft_reranker_5_random_hard_negatives",
model_type="RERANK",
base_model="english",
)
finetune_model_random_hard_negatives.finetune()
finetune_model_cosine_hard_negatives = CohereRerankerFinetuneEngine(
train_file_name="train_5_cosine_similarity.jsonl",
val_file_name="val_5_cosine_similarity.jsonl",
model_name="lyft_reranker_5_cosine_hard_negatives",
model_type="RERANK",
base_model="english",
)
finetune_model_cosine_hard_negatives.finetune()
reranker_base = CohereRerank(top_n=5)
reranker_model_0 = finetune_model_no_hard_negatives.get_finetuned_model(
top_n=5
)
reranker_model_5_random = (
finetune_model_random_hard_negatives.get_finetuned_model(top_n=5)
)
reranker_model_5_cosine = (
finetune_model_cosine_hard_negatives.get_finetuned_model(top_n=5)
)
RERANKERS = {
"WithoutReranker": "None",
"CohereRerank": reranker_base,
"CohereRerank_0": reranker_model_0,
"CohereRerank_5_random": reranker_model_5_random,
"CohereRerank_5_cosine": reranker_model_5_cosine,
}
def display_results(embedding_name, reranker_name, eval_results):
"""Display results from evaluate."""
metric_dicts = []
for eval_result in eval_results:
metric_dict = eval_result.metric_vals_dict
metric_dicts.append(metric_dict)
full_df = pd.DataFrame(metric_dicts)
hit_rate = full_df["hit_rate"].mean()
mrr = full_df["mrr"].mean()
metric_df = pd.DataFrame(
{
"Embedding": [embedding_name],
"Reranker": [reranker_name],
"hit_rate": [hit_rate],
"mrr": [mrr],
}
)
return metric_df
index_embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key,
model_name="embed-english-v3.0",
input_type="search_document",
)
query_embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key,
model_name="embed-english-v3.0",
input_type="search_query",
)
vector_index = VectorStoreIndex(
uber_nodes[:150],
embed_model=index_embed_model,
)
vector_retriever = VectorIndexRetriever(
index=vector_index,
similarity_top_k=10,
embed_model=query_embed_model,
)
results_df = pd.DataFrame()
embed_name = "CohereEmbedding"
for rerank_name, reranker in RERANKERS.items():
print(f"Running Evaluation for Reranker: {rerank_name}")
class CustomRetriever(BaseRetriever):
"""Custom retriever that performs both Vector search and Knowledge Graph search"""
def __init__(
self,
vector_retriever: VectorIndexRetriever,
) -> None:
"""Init params."""
self._vector_retriever = vector_retriever
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query."""
retrieved_nodes = self._vector_retriever.retrieve(query_bundle)
if reranker != "None":
retrieved_nodes = reranker.postprocess_nodes(
retrieved_nodes, query_bundle
)
else:
retrieved_nodes = retrieved_nodes[:5]
return retrieved_nodes
async def _aretrieve(
self, query_bundle: QueryBundle
) -> List[NodeWithScore]:
"""Asynchronously retrieve nodes given query.
Implemented by the user.
"""
return self._retrieve(query_bundle)
async def aretrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
if isinstance(str_or_query_bundle, str):
str_or_query_bundle = | QueryBundle(str_or_query_bundle) | llama_index.core.QueryBundle |
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import DatasetGenerator
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
import random
random.seed(42)
random.shuffle(documents)
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
question_gen_query = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context from a "
"report on climate change and the oceans, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
dataset_generator = DatasetGenerator.from_documents(
documents[:50],
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("train_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
dataset_generator = DatasetGenerator.from_documents(
documents[
50:
], # since we generated ~1 question for 40 documents, we can skip the first 40
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("eval_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index.core import VectorStoreIndex, Settings
Settings.context_window = 2048
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=gpt_35_llm)
contexts = []
answers = []
for question in questions:
response = query_engine.query(question)
contexts.append([x.node.get_content() for x in response.source_nodes])
answers.append(str(response))
from datasets import Dataset
from ragas import evaluate
from ragas.metrics import answer_relevancy, faithfulness
ds = Dataset.from_dict(
{
"question": questions,
"answer": answers,
"contexts": contexts,
}
)
result = evaluate(ds, [answer_relevancy, faithfulness])
print(result)
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm = OpenAI(model="gpt-4", temperature=0.3)
Settings.callback_manager = (callback_manager,)
questions = []
with open("train_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index.core import VectorStoreIndex
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-4")
Settings.embed_model = | OpenAIEmbedding(model="text-embedding-3-small") | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index llama-index-vector-stores-qdrant -q')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data')
get_ipython().system('wget "https://arxiv.org/pdf/2402.09353.pdf" -O "./data/dorav1.pdf"')
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4")
response = llm.complete("What is DoRA?")
print(response.text)
"""Load the data.
With llama-index, before any transformations are applied,
data is loaded in the `Document` abstraction, which is
a container that holds the text of the document.
"""
from llama_index.core import SimpleDirectoryReader
loader = SimpleDirectoryReader(input_dir="./data")
documents = loader.load_data()
"""Chunk, Encode, and Store into a Vector Store.
To streamline the process, we can make use of the IngestionPipeline
class that will apply your specified transformations to the
Document's.
"""
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client
client = qdrant_client.QdrantClient(location=":memory:")
vector_store = QdrantVectorStore(client=client, collection_name="test_store")
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
OpenAIEmbedding(),
],
vector_store=vector_store,
)
_nodes = pipeline.run(documents=documents, num_workers=4)
"""Create a llama-index... wait for it... Index.
After uploading your encoded documents into your vector
store of choice, you can connect to it with a VectorStoreIndex
which then gives you access to all of the llama-index functionality.
"""
from llama_index.core import VectorStoreIndex
index = | VectorStoreIndex.from_vector_store(vector_store=vector_store) | llama_index.core.VectorStoreIndex.from_vector_store |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.llama_dataset import download_llama_dataset
rag_dataset, documents = download_llama_dataset(
"PaulGrahamEssayDataset", "./paul_graham"
)
rag_dataset.to_pandas()[:5]
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
import nest_asyncio
nest_asyncio.apply()
prediction_dataset = await rag_dataset.amake_predictions_with(
query_engine=query_engine, show_progress=True
)
prediction_dataset.to_pandas()[:5]
import tqdm
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import (
CorrectnessEvaluator,
FaithfulnessEvaluator,
RelevancyEvaluator,
SemanticSimilarityEvaluator,
)
judges = {}
judges["correctness"] = CorrectnessEvaluator(
llm= | OpenAI(temperature=0, model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv")
df
from llama_index.packs.tables.chain_of_table.base import (
ChainOfTableQueryEngine,
serialize_table,
)
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"ChainOfTablePack",
"./chain_of_table_pack",
skip_load=True,
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import pandas as pd
df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("Who won best Director in the 1972 Academy Awards?")
str(response.response)
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/42.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("What was the precipitation in inches during June?")
str(response)
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
prompt_str = """\
Here's a serialized table.
{serialized_table}
Given this table please answer the question: {question}
Answer: """
prompt = PromptTemplate(prompt_str)
prompt_c = prompt.as_query_component(partial={"serialized_table": serialize_table(df)})
qp = QueryPipeline(chain=[prompt_c, llm])
response = qp.run("What was the precipitation in inches during June?")
print(str(response))
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/203-csv/114.csv")
df
query_engine = | ChainOfTableQueryEngine(df, llm=llm, verbose=True) | llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
MONGO_URI = os.environ["MONGO_URI"]
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.index_store.mongodb import MongoIndexStore
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store=MongoIndexStore.from_uri(uri=MONGO_URI),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store=MongoIndexStore.from_uri(uri=MONGO_URI),
)
summary_index = load_index_from_storage(
storage_context=storage_context, index_id=list_id
)
vector_index = load_index_from_storage(
storage_context=storage_context, vector_id=vector_id
)
keyword_table_index = load_index_from_storage(
storage_context=storage_context, keyword_id=keyword_id
)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = chatgpt
Settings.chunk_size = 1024
query_engine = summary_index.as_query_engine()
list_response = query_engine.query("What is a summary of this document?")
display_response(list_response)
query_engine = vector_index.as_query_engine()
vector_response = query_engine.query("What did the author do growing up?")
display_response(vector_response)
query_engine = keyword_table_index.as_query_engine()
keyword_response = query_engine.query(
"What did the author do after his time at YC?"
)
| display_response(keyword_response) | llama_index.core.response.notebook_utils.display_response |
import os
print(os.listdir("./discord_dumps"))
import json
with open("./discord_dumps/help_channel_dump_05_25_23.json", "r") as f:
data = json.load(f)
print("JSON keys: ", data.keys(), "\n")
print("Message Count: ", len(data["messages"]), "\n")
print("Sample Message Keys: ", data["messages"][0].keys(), "\n")
print("First Message: ", data["messages"][0]["content"], "\n")
print("Last Message: ", data["messages"][-1]["content"])
get_ipython().system('python ./group_conversations.py ./discord_dumps/help_channel_dump_05_25_23.json')
with open("conversation_docs.json", "r") as f:
threads = json.load(f)
print("Thread keys: ", threads[0].keys(), "\n")
print(threads[0]["metadata"], "\n")
print(threads[0]["thread"], "\n")
from llama_index.core import Document
documents = []
for thread in threads:
thread_text = thread["thread"]
thread_id = thread["metadata"]["id"]
timestamp = thread["metadata"]["timestamp"]
documents.append(
Document(text=thread_text, id_=thread_id, metadata={"date": timestamp})
)
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
print("ref_docs ingested: ", len(index.ref_doc_info))
print("number of input documents: ", len(documents))
thread_id = threads[0]["metadata"]["id"]
print(index.ref_doc_info[thread_id])
index.storage_context.persist(persist_dir="./storage")
from llama_index.core import StorageContext, load_index_from_storage
index = load_index_from_storage(
| StorageContext.from_defaults(persist_dir="./storage") | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system('pip install -q llama-index google-generativeai')
get_ipython().run_line_magic('env', 'GOOGLE_API_KEY=...')
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.llms.gemini import Gemini
resp = Gemini().complete("Write a poem about a magic backpack")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.gemini import Gemini
messages = [
ChatMessage(role="user", content="Hello friend!"),
ChatMessage(role="assistant", content="Yarr what is shakin' matey?"),
ChatMessage(
role="user", content="Help me decide what to have for dinner."
),
]
resp = Gemini().chat(messages)
print(resp)
from llama_index.llms.gemini import Gemini
llm = Gemini()
resp = llm.stream_complete(
"The story of Sourcrust, the bread creature, is really interesting. It all started when..."
)
for r in resp:
print(r.text, end="")
from llama_index.llms.gemini import Gemini
from llama_index.core.llms import ChatMessage
llm = Gemini()
messages = [
ChatMessage(role="user", content="Hello friend!"),
ChatMessage(role="assistant", content="Yarr what is shakin' matey?"),
ChatMessage(
role="user", content="Help me decide what to have for dinner."
),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
import google.generativeai as genai
for m in genai.list_models():
if "generateContent" in m.supported_generation_methods:
print(m.name)
from llama_index.llms.gemini import Gemini
llm = Gemini(model="models/gemini-pro")
resp = llm.complete("Write a short, but joyous, ode to LlamaIndex")
print(resp)
from llama_index.llms.gemini import Gemini
llm = | Gemini() | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.callbacks import (
CallbackManager,
LlamaDebugHandler,
CBEventType,
)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
docs = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0)
llama_debug = | LlamaDebugHandler(print_trace_on_end=True) | llama_index.core.callbacks.LlamaDebugHandler |
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import TransformQueryEngine
from IPython.display import Markdown, display
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_str = "what did paul graham do after going to RISD"
query_engine = index.as_query_engine()
response = query_engine.query(query_str)
display(Markdown(f"<b>{response}</b>"))
hyde = | HyDEQueryTransform(include_original=True) | llama_index.core.indices.query.query_transform.HyDEQueryTransform |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
import getpass
import openai
openai.api_key = "sk-"
import chromadb
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.chroma import ChromaVectorStore
from IPython.display import Markdown, display
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
from llama_index.core import StorageContext
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", value="Mafia"),
MetadataFilter(key="year", value=1972),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import FilterOperator, FilterCondition
filters = MetadataFilters(
filters=[
| MetadataFilter(key="theme", value="Fiction") | llama_index.core.vector_stores.MetadataFilter |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(
input_files=["./data/paul_graham/paul_graham_essay.txt"]
)
docs = reader.load_data()
text = docs[0].text
from llama_index.core import PromptTemplate
qa_prompt_tmpl = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Please also write the answer in the style of {tone_name}.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt = PromptTemplate(qa_prompt_tmpl)
refine_prompt_tmpl = (
"The original query is as follows: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the query. "
"Please also write the answer in the style of {tone_name}.\n"
"If the context isn't useful, return the original answer.\n"
"Refined Answer: "
)
refine_prompt = PromptTemplate(refine_prompt_tmpl)
from llama_index.core.response_synthesizers import TreeSummarize, Refine
from llama_index.core.types import BaseModel
from typing import List
summarizer = | TreeSummarize(verbose=True, summary_template=qa_prompt) | llama_index.core.response_synthesizers.TreeSummarize |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().system('pip install llama-index weaviate-client')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-<your key here>"
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import weaviate
resource_owner_config = weaviate.AuthClientPassword(
username="",
password="",
)
client = weaviate.Client(
"https://test.weaviate.network",
auth_client_secret=resource_owner_config,
)
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from IPython.display import Markdown, display
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
from llama_index.core import StorageContext
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name="LlamaIndex_filter"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
retriever = index.as_retriever()
retriever.retrieve("What is inception?")
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", value="Mafia"),
MetadataFilter(key="year", value=1972),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception?")
from llama_index.core.vector_stores import FilterOperator, FilterCondition
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", value="Fiction"),
| MetadataFilter(key="year", value=1997, operator=FilterOperator.GT) | llama_index.core.vector_stores.MetadataFilter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-monsterapi')
get_ipython().system('python3 -m pip install llama-index --quiet -y')
get_ipython().system('python3 -m pip install monsterapi --quiet')
get_ipython().system('python3 -m pip install sentence_transformers --quiet')
import os
from llama_index.llms.monsterapi import MonsterLLM
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
os.environ["MONSTER_API_KEY"] = ""
model = "llama2-7b-chat"
llm = MonsterLLM(model=model, temperature=0.75)
result = llm.complete("Who are you?")
print(result)
from llama_index.core.llms import ChatMessage
history_message = ChatMessage(
**{
"role": "user",
"content": (
"When asked 'who are you?' respond as 'I am qblocks llm model'"
" everytime."
),
}
)
current_message = | ChatMessage(**{"role": "user", "content": "Who are you?"}) | llama_index.core.llms.ChatMessage |
get_ipython().system('pip install llama-index')
import openai
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
openai.api_key = os.environ["OPENAI_API_KEY"]
from typing import Any, List
from InstructorEmbedding import INSTRUCTOR
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.embeddings import BaseEmbedding
class InstructorEmbeddings(BaseEmbedding):
_model: INSTRUCTOR = PrivateAttr()
_instruction: str = PrivateAttr()
def __init__(
self,
instructor_model_name: str = "hkunlp/instructor-large",
instruction: str = "Represent a document for semantic search:",
**kwargs: Any,
) -> None:
self._model = INSTRUCTOR(instructor_model_name)
self._instruction = instruction
super().__init__(**kwargs)
@classmethod
def class_name(cls) -> str:
return "instructor"
async def _aget_query_embedding(self, query: str) -> List[float]:
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
return self._get_text_embedding(text)
def _get_query_embedding(self, query: str) -> List[float]:
embeddings = self._model.encode([[self._instruction, query]])
return embeddings[0]
def _get_text_embedding(self, text: str) -> List[float]:
embeddings = self._model.encode([[self._instruction, text]])
return embeddings[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
embeddings = self._model.encode(
[[self._instruction, text] for text in texts]
)
return embeddings
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
embed_model = InstructorEmbeddings(embed_batch_size=2)
Settings.embed_model = embed_model
Settings.chunk_size = 512
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla')
get_ipython().system('pip/pip3 install pyepsilla')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.epsilla import EpsillaVectorStore
import textwrap
import openai
import getpass
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
openai.api_key = OPENAI_API_KEY
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(f"Total documents: {len(documents)}")
print(f"First document, id: {documents[0].doc_id}")
print(f"First document, hash: {documents[0].hash}")
from pyepsilla import vectordb
client = vectordb.Client()
vector_store = EpsillaVectorStore(client=client, db_path="/tmp/llamastore")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("How did the author learn about AI?")
print(textwrap.fill(str(response), 100))
vector_store = EpsillaVectorStore(client=client, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
single_doc = Document(text="Epsilla is the vector database we are using.")
index = VectorStoreIndex.from_documents(
[single_doc],
storage_context=storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What vector database is being used?")
print(textwrap.fill(str(response), 100))
vector_store = | EpsillaVectorStore(client=client, overwrite=False) | llama_index.vector_stores.epsilla.EpsillaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama_hub')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
index = VectorStoreIndex(base_nodes)
query_engine = index.as_query_engine(similarity_top_k=2)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json')
from llama_index.core.evaluation import QueryResponseDataset
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
from llama_index.core.evaluation.eval_utils import get_responses
from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner
evaluator_c = CorrectnessEvaluator()
evaluator_dict = {"correctness": evaluator_c}
batch_runner = BatchEvalRunner(evaluator_dict, workers=2, show_progress=True)
import numpy as np
async def get_correctness(query_engine, eval_qa_pairs, batch_runner):
eval_qs = [q for q, _ in eval_qa_pairs]
eval_answers = [a for _, a in eval_qa_pairs]
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_answers
)
avg_correctness = np.array(
[r.score for r in eval_results["correctness"]]
).mean()
return avg_correctness
emotion_stimuli_dict = {
"ep01": "Write your answer and give me a confidence score between 0-1 for your answer. ",
"ep02": "This is very important to my career. ",
"ep03": "You'd better be sure.",
}
emotion_stimuli_dict["ep06"] = (
emotion_stimuli_dict["ep01"]
+ emotion_stimuli_dict["ep02"]
+ emotion_stimuli_dict["ep03"]
)
QA_PROMPT_KEY = "response_synthesizer:text_qa_template"
from llama_index.core import PromptTemplate
qa_tmpl_str = """\
Context information is below.
---------------------
{context_str}
---------------------
Given the context information and not prior knowledge, \
answer the query.
{emotion_str}
Query: {query_str}
Answer: \
"""
qa_tmpl = | PromptTemplate(qa_tmpl_str) | llama_index.core.PromptTemplate |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai pandas[jinja2] spacy')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
TreeIndex,
VectorStoreIndex,
SimpleDirectoryReader,
Response,
)
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import RelevancyEvaluator
from llama_index.core.node_parser import SentenceSplitter
import pandas as pd
pd.set_option("display.max_colwidth", 0)
gpt3 = OpenAI(temperature=0, model="gpt-3.5-turbo")
gpt4 = OpenAI(temperature=0, model="gpt-4")
evaluator = RelevancyEvaluator(llm=gpt3)
evaluator_gpt4 = | RelevancyEvaluator(llm=gpt4) | llama_index.core.evaluation.RelevancyEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import openai
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
data = | SimpleDirectoryReader(input_dir="./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
import cProfile, pstats
from pstats import SortKey
get_ipython().system('llamaindex-cli download-llamadataset PatronusAIFinanceBenchDataset --download-dir ./data')
from llama_index.core import SimpleDirectoryReader
reader = | SimpleDirectoryReader(input_dir="./data/source_files") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("../data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 1024
nodes = Settings.node_parser.get_nodes_from_documents(documents)
from llama_index.core import StorageContext
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
from llama_index.core import SummaryIndex
from llama_index.core import VectorStoreIndex
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
list_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = vector_index.as_query_engine()
from llama_index.core.tools import QueryEngineTool
list_tool = QueryEngineTool.from_defaults(
query_engine=list_query_engine,
description=(
"Useful for summarization questions related to Paul Graham eassy on"
" What I Worked On."
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context from Paul Graham essay on What"
" I Worked On."
),
)
from llama_index.core.query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
query_engine = RouterQueryEngine(
selector= | PydanticSingleSelector.from_defaults() | llama_index.core.selectors.PydanticSingleSelector.from_defaults |
from utils import get_train_str, get_train_and_eval_data, get_eval_preds, train_prompt
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter("ignore")
train_df, train_labels, eval_df, eval_labels = get_train_and_eval_data("data/train.csv")
print(train_prompt.template)
train_n = 10
eval_n = 40
train_str = get_train_str(train_df, train_labels, train_n=train_n)
print(f"Example datapoints in `train_str`: \n{train_str}")
from sklearn.metrics import accuracy_score
import numpy as np
eval_preds = get_eval_preds(train_prompt, train_str, eval_df, n=eval_n)
eval_label_chunk = eval_labels[:eval_n]
acc = accuracy_score(eval_label_chunk, np.array(eval_preds).round())
print(f"ACCURACY: {acc}")
from sklearn.metrics import accuracy_score
import numpy as np
eval_preds_null = get_eval_preds(train_prompt, "", eval_df, n=eval_n)
eval_label_chunk = eval_labels[:eval_n]
acc_null = accuracy_score(eval_label_chunk, np.array(eval_preds_null).round())
print(f"ACCURACY: {acc_null}")
from llama_index import SummaryIndex
from llama_index.schema import Document
index = SummaryIndex([])
batch_size = 40
num_train_chunks = 5
for i in range(num_train_chunks):
print(f"Inserting chunk: {i}/{num_train_chunks}")
start_idx = i * batch_size
end_idx = (i + 1) * batch_size
train_batch = train_df.iloc[start_idx : end_idx + batch_size]
labels_batch = train_labels.iloc[start_idx : end_idx + batch_size]
all_train_str = get_train_str(train_batch, labels_batch, train_n=batch_size)
index.insert( | Document(text=all_train_str) | llama_index.schema.Document |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = | AgentFnComponent(fn=process_response_fn) | llama_index.core.query_pipeline.AgentFnComponent |
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from os import getenv
from llama_index.core import SimpleDirectoryReader
from llama_index.vector_stores.opensearch import (
OpensearchVectorStore,
OpensearchVectorClient,
)
from llama_index.core import VectorStoreIndex, StorageContext
endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200")
idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
text_field = "content"
embedding_field = "embedding"
client = OpensearchVectorClient(
endpoint, idx, 1536, embedding_field=embedding_field, text_field=text_field
)
vector_store = OpensearchVectorStore(client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents=documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
res = query_engine.query("What did the author do growing up?")
res.response
from llama_index.core import Document
from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter
import regex as re
text_chunks = documents[0].text.split("\n\n")
footnotes = [
Document(
text=chunk,
id=documents[0].doc_id,
metadata={"is_footnote": bool(re.search(r"^\s*\[\d+\]\s*", chunk))},
)
for chunk in text_chunks
if bool(re.search(r"^\s*\[\d+\]\s*", chunk))
]
for f in footnotes:
index.insert(f)
footnote_query_engine = index.as_query_engine(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="term", value='{"metadata.is_footnote": "true"}'
),
ExactMatchFilter(
key="query_string",
value='{"query": "content: space AND content: lisp"}',
),
]
)
)
res = footnote_query_engine.query(
"What did the author about space aliens and lisp?"
)
res.response
from llama_index.readers.elasticsearch import ElasticsearchReader
rdr = ElasticsearchReader(endpoint, idx)
docs = rdr.load_data(text_field, embedding_field=embedding_field)
print("embedding dimension:", len(docs[0].embedding))
print("all fields in index:", docs[0].metadata.keys())
print("total number of chunks created:", len(docs))
docs = rdr.load_data(text_field, {"query": {"match": {text_field: "Lisp"}}})
print("chunks that mention Lisp:", len(docs))
docs = rdr.load_data(text_field, {"query": {"match": {text_field: "Yahoo"}}})
print("chunks that mention Yahoo:", len(docs))
from os import getenv
from llama_index.vector_stores.opensearch import (
OpensearchVectorStore,
OpensearchVectorClient,
)
endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200")
idx = getenv("OPENSEARCH_INDEX", "auto_retriever_movies")
text_field = "content"
embedding_field = "embedding"
client = OpensearchVectorClient(
endpoint,
idx,
4096,
embedding_field=embedding_field,
text_field=text_field,
search_pipeline="hybrid-search-pipeline",
)
from llama_index.embeddings.ollama import OllamaEmbedding
embed_model = OllamaEmbedding(model_name="llama2")
vector_store = | OpensearchVectorStore(client) | llama_index.vector_stores.opensearch.OpensearchVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('mkdir -p data')
get_ipython().system('echo "This is a test file: one!" > data/test1.txt')
get_ipython().system('echo "This is a test file: two!" > data/test2.txt')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data", filename_as_id=True).load_data()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.core.node_parser import SentenceSplitter
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5"),
],
docstore= | SimpleDocumentStore() | llama_index.core.storage.docstore.SimpleDocumentStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-watsonx')
from llama_index.llms.watsonx import WatsonX
credentials = {
"url": "https://enter.your-ibm.url",
"apikey": "insert_your_api_key",
}
project_id = "insert_your_project_id"
resp = WatsonX(credentials=credentials, project_id=project_id).complete(
"Paul Graham is"
)
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.watsonx import WatsonX
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = WatsonX(
model_id="meta-llama/llama-2-70b-chat",
credentials=credentials,
project_id=project_id,
).chat(messages)
print(resp)
from llama_index.llms.watsonx import WatsonX
llm = WatsonX(credentials=credentials, project_id=project_id)
resp = llm.stream_complete("Paul Graham is")
for r in resp:
print(r.delta, end="")
from llama_index.llms.watsonx import WatsonX
llm = WatsonX(
model_id="meta-llama/llama-2-70b-chat",
credentials=credentials,
project_id=project_id,
)
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
| ChatMessage(role="user", content="Tell me a story") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document, VectorStoreIndex
from llama_index.readers.file import PyMuPDFReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.llms.openai import OpenAI
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [ | Document(text=doc_text) | llama_index.core.Document |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from genaix.list_corpora(client=client)
def delete_corpus(*, corpus_id: str) -> None:
client = genaix.build_semantic_retriever()
genaix.delete_corpus(corpus_id=corpus_id, client=client)
def cleanup_colab_corpora():
for corpus in list_corpora():
if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX):
try:
delete_corpus(corpus_id=corpus.corpus_id)
print(f"Deleted corpus {corpus.corpus_id}.")
except Exception:
pass
cleanup_colab_corpora()
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
from llama_index.core import Response
import time
index = GoogleIndex.create_corpus(
corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!"
)
print(f"Newly created corpus ID is {index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index.insert_documents(documents)
for corpus in list_corpora():
print(corpus)
query_engine = index.as_query_engine()
response = query_engine.query("What did Paul Graham do growing up?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine()
response = query_engine.query("Which company did Paul Graham build?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
index.insert_nodes(
[
TextNode(
text="It was the best of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="It was the worst of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="Bugs Bunny: Wassup doc?",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Bugs Bunny Adventure"},
)
},
),
]
)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
HarmCategory,
SafetySetting,
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
SafetySetting(
category=HarmCategory.HARM_CATEGORY_VIOLENCE,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH,
),
],
)
response = query_engine.query("What was Bugs Bunny's favorite saying?")
print(response)
from llama_index.core import Response
response = query_engine.query("What were Paul Graham's achievements?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
from llama_index.llms.gemini import Gemini
GEMINI_API_KEY = "" # @param {type:"string"}
gemini = Gemini(api_key=GEMINI_API_KEY)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
reranker = LLMRerank(
top_n=10,
llm=gemini,
)
query_engine = RetrieverQueryEngine.from_args(
retriever=VectorIndexRetriever(
index=index,
similarity_top_k=20,
),
node_postprocessors=[reranker],
response_synthesizer=response_synthesizer,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform.base import (
StepDecomposeQueryTransform,
)
from llama_index.core.query_engine import MultiStepQueryEngine
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
single_step_query_engine = index.as_query_engine(
similarity_top_k=10,
response_synthesizer=response_synthesizer,
)
step_decompose_transform = StepDecomposeQueryTransform(
llm=gemini,
verbose=True,
)
query_engine = MultiStepQueryEngine(
query_engine=single_step_query_engine,
query_transform=step_decompose_transform,
response_synthesizer=response_synthesizer,
index_summary="Ask me anything.",
num_steps=6,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import TransformQueryEngine
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
base_query_engine = index.as_query_engine(
similarity_top_k=10,
response_synthesizer=response_synthesizer,
)
hyde = HyDEQueryTransform(
llm=gemini,
include_original=False,
)
hyde_query_engine = TransformQueryEngine(base_query_engine, hyde)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
)
reranker = LLMRerank(
top_n=10,
llm=gemini,
)
single_step_query_engine = index.as_query_engine(
similarity_top_k=20,
node_postprocessors=[reranker],
response_synthesizer=response_synthesizer,
)
hyde = HyDEQueryTransform(
llm=gemini,
include_original=False,
)
hyde_query_engine = | TransformQueryEngine(single_step_query_engine, hyde) | llama_index.core.query_engine.TransformQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True)
output = p.run(movie_name="The Departed")
print(str(output))
from typing import List
from pydantic import BaseModel, Field
from llama_index.core.output_parsers import PydanticOutputParser
class Movie(BaseModel):
"""Object representing a single movie."""
name: str = Field(..., description="Name of the movie.")
year: int = Field(..., description="Year of the movie.")
class Movies(BaseModel):
"""Object representing a list of movies."""
movies: List[Movie] = Field(..., description="List of movies.")
llm = OpenAI(model="gpt-3.5-turbo")
output_parser = PydanticOutputParser(Movies)
json_prompt_str = """\
Please generate related movies to {movie_name}. Output with the following JSON format:
"""
json_prompt_str = output_parser.format(json_prompt_str)
json_prompt_tmpl = PromptTemplate(json_prompt_str)
p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True)
output = p.run(movie_name="Toy Story")
output
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
prompt_str2 = """\
Here's some text:
{text}
Can you rewrite this with a summary of each movie?
"""
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
llm_c = llm.as_query_component(streaming=True)
p = QueryPipeline(
chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True
)
output = p.run(movie_name="The Dark Knight")
for o in output:
print(o.delta, end="")
p = QueryPipeline(
chain=[
json_prompt_tmpl,
llm.as_query_component(streaming=True),
output_parser,
],
verbose=True,
)
output = p.run(movie_name="Toy Story")
print(output)
from llama_index.postprocessor.cohere_rerank import CohereRerank
prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl1 = PromptTemplate(prompt_str1)
prompt_str2 = (
"Please write a passage to answer the question\n"
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{query_str}\n"
"\n"
"\n"
'Passage:"""\n'
)
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=5)
p = QueryPipeline(
chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever], verbose=True
)
nodes = p.run(topic="college")
len(nodes)
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
prompt_str = "Please generate a question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=3)
reranker = CohereRerank()
summarizer = TreeSummarize(llm=llm)
p = QueryPipeline(verbose=True)
p.add_modules(
{
"llm": llm,
"prompt_tmpl": prompt_tmpl,
"retriever": retriever,
"summarizer": summarizer,
"reranker": reranker,
}
)
p.add_link("prompt_tmpl", "llm")
p.add_link("llm", "retriever")
p.add_link("retriever", "reranker", dest_key="nodes")
p.add_link("llm", "reranker", dest_key="query_str")
p.add_link("reranker", "summarizer", dest_key="nodes")
p.add_link("llm", "summarizer", dest_key="query_str")
print(summarizer.as_query_component().input_keys)
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(p.dag)
net.show("rag_dag.html")
response = p.run(topic="YC")
print(str(response))
response = await p.arun(topic="YC")
print(str(response))
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.query_pipeline import InputComponent
retriever = index.as_retriever(similarity_top_k=5)
summarizer = TreeSummarize(llm=OpenAI(model="gpt-3.5-turbo"))
reranker = CohereRerank()
p = QueryPipeline(verbose=True)
p.add_modules(
{
"input": InputComponent(),
"retriever": retriever,
"summarizer": summarizer,
}
)
p.add_link("input", "retriever")
p.add_link("input", "summarizer", dest_key="query_str")
p.add_link("retriever", "summarizer", dest_key="nodes")
output = p.run(input="what did the author do in YC")
print(str(output))
from llama_index.core.query_pipeline import (
CustomQueryComponent,
InputKeys,
OutputKeys,
)
from typing import Dict, Any
from llama_index.core.llms.llm import LLM
from pydantic import Field
class RelatedMovieComponent(CustomQueryComponent):
"""Related movie component."""
llm: LLM = Field(..., description="OpenAI LLM")
def _validate_component_inputs(
self, input: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
return input
@property
def _input_keys(self) -> set:
"""Input keys dict."""
return {"movie"}
@property
def _output_keys(self) -> set:
return {"output"}
def _run_component(self, **kwargs) -> Dict[str, Any]:
"""Run the component."""
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
p = QueryPipeline(chain=[prompt_tmpl, llm])
return {"output": p.run(movie_name=kwargs["movie"])}
llm = OpenAI(model="gpt-3.5-turbo")
component = RelatedMovieComponent(llm=llm)
prompt_str = """\
Here's some text:
{text}
Can you rewrite this in the voice of Shakespeare?
"""
prompt_tmpl = PromptTemplate(prompt_str)
p = | QueryPipeline(chain=[component, prompt_tmpl, llm], verbose=True) | llama_index.core.query_pipeline.QueryPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
get_ipython().system('pip install wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = WikipediaReader().load_data(pages=cities)
from llama_index.core import SQLDatabase
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
vector_indices = {}
vector_query_engines = {}
for city, wiki_doc in zip(cities, wiki_docs):
vector_index = | VectorStoreIndex.from_documents([wiki_doc]) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from genaix.list_corpora(client=client)
def delete_corpus(*, corpus_id: str) -> None:
client = genaix.build_semantic_retriever()
genaix.delete_corpus(corpus_id=corpus_id, client=client)
def cleanup_colab_corpora():
for corpus in list_corpora():
if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX):
try:
delete_corpus(corpus_id=corpus.corpus_id)
print(f"Deleted corpus {corpus.corpus_id}.")
except Exception:
pass
cleanup_colab_corpora()
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
from llama_index.core import Response
import time
index = GoogleIndex.create_corpus(
corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!"
)
print(f"Newly created corpus ID is {index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index.insert_documents(documents)
for corpus in list_corpora():
print(corpus)
query_engine = index.as_query_engine()
response = query_engine.query("What did Paul Graham do growing up?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine()
response = query_engine.query("Which company did Paul Graham build?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
index.insert_nodes(
[
TextNode(
text="It was the best of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="It was the worst of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="Bugs Bunny: Wassup doc?",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Bugs Bunny Adventure"},
)
},
),
]
)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
HarmCategory,
SafetySetting,
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
SafetySetting(
category=HarmCategory.HARM_CATEGORY_VIOLENCE,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH,
),
],
)
response = query_engine.query("What was Bugs Bunny's favorite saying?")
print(response)
from llama_index.core import Response
response = query_engine.query("What were Paul Graham's achievements?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
from llama_index.llms.gemini import Gemini
GEMINI_API_KEY = "" # @param {type:"string"}
gemini = Gemini(api_key=GEMINI_API_KEY)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
reranker = LLMRerank(
top_n=10,
llm=gemini,
)
query_engine = RetrieverQueryEngine.from_args(
retriever=VectorIndexRetriever(
index=index,
similarity_top_k=20,
),
node_postprocessors=[reranker],
response_synthesizer=response_synthesizer,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform.base import (
StepDecomposeQueryTransform,
)
from llama_index.core.query_engine import MultiStepQueryEngine
store = | GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID) | llama_index.vector_stores.google.GoogleVectorStore.from_corpus |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core import Settings
from llama_index.llms.openai import OpenAI
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = | SimpleDirectoryReader("./data/paul_graham") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.storage.kvstore.firestore import FirestoreKVStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.index_store.firestore import FirestoreIndexStore
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore=FirestoreDocumentStore(kvstore),
index_store=FirestoreIndexStore(kvstore),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
kvstore = | FirestoreKVStore() | llama_index.storage.kvstore.firestore.FirestoreKVStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-mongodb')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index pymongo')
from llama_index.core import SummaryIndex
from llama_index.readers.mongodb import SimpleMongoReader
from IPython.display import Markdown, display
import os
host = "<host>"
port = "<port>"
db_name = "<db_name>"
collection_name = "<collection_name>"
query_dict = {}
field_names = ["text"]
reader = SimpleMongoReader(host, port)
documents = reader.load_data(
db_name, collection_name, field_names, query_dict=query_dict
)
index = | SummaryIndex.from_documents(documents) | llama_index.core.SummaryIndex.from_documents |
get_ipython().system('pip install llama-index yfinance')
import openai
from llama_index.agent import OpenAIAgent
openai.api_key = "sk-..."
from llama_index.tools.yahoo_finance.base import YahooFinanceToolSpec
finance_tool = | YahooFinanceToolSpec() | llama_index.tools.yahoo_finance.base.YahooFinanceToolSpec |
from llama_hub.semanticscholar.base import SemanticScholarReader
import os
import openai
from llama_index.llms import OpenAI
from llama_index.query_engine import CitationQueryEngine
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
ServiceContext,
)
from llama_index.response.notebook_utils import display_response
s2reader = SemanticScholarReader()
openai.api_key = os.environ["OPENAI_API_KEY"]
service_context = ServiceContext.from_defaults(
llm= | OpenAI(model="gpt-3.5-turbo", temperature=0) | llama_index.llms.OpenAI |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.wolfram_alpha.base import WolframAlphaToolSpec
wolfram_spec = WolframAlphaToolSpec(app_id="your-key")
tools = wolfram_spec.to_tool_list()
agent = | OpenAIAgent.from_tools(tools, verbose=True) | llama_index.agent.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama_hub')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
index = VectorStoreIndex(base_nodes)
query_engine = index.as_query_engine(similarity_top_k=2)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json')
from llama_index.core.evaluation import QueryResponseDataset
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
from llama_index.core.evaluation.eval_utils import get_responses
from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner
evaluator_c = | CorrectnessEvaluator() | llama_index.core.evaluation.CorrectnessEvaluator |
get_ipython().system('pip install llama-index-multi-modal-llms-anthropic')
get_ipython().system('pip install llama-index-vector-stores-qdrant')
get_ipython().system('pip install matplotlib')
import os
os.environ["ANTHROPIC_API_KEY"] = "" # Your ANTHROPIC API key here
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open("../data/images/prometheus_paper_card.png")
plt.imshow(img)
from llama_index.core import SimpleDirectoryReader
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal
image_documents = SimpleDirectoryReader(
input_files=["../data/images/prometheus_paper_card.png"]
).load_data()
anthropic_mm_llm = AnthropicMultiModal(max_tokens=300)
response = anthropic_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(response)
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
image_urls = [
"https://venturebeat.com/wp-content/uploads/2024/03/Screenshot-2024-03-04-at-12.49.41%E2%80%AFAM.png",
]
img_response = requests.get(image_urls[0])
img = Image.open(BytesIO(img_response.content))
plt.imshow(img)
image_url_documents = load_image_urls(image_urls)
response = anthropic_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_url_documents,
)
print(response)
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader(
input_files=["../data/images/ark_email_sample.PNG"]
).load_data()
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open("../data/images/ark_email_sample.PNG")
plt.imshow(img)
from pydantic import BaseModel
from typing import List
class TickerInfo(BaseModel):
"""List of ticker info."""
direction: str
ticker: str
company: str
shares_traded: int
percent_of_total_etf: float
class TickerList(BaseModel):
"""List of stock tickers."""
fund: str
tickers: List[TickerInfo]
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
Can you get the stock information in the image \
and return the answer? Pick just one fund.
Make sure the answer is a JSON format corresponding to a Pydantic schema. The Pydantic schema is given below.
"""
anthropic_mm_llm = AnthropicMultiModal(max_tokens=300)
llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_cls=TickerList,
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=anthropic_mm_llm,
verbose=True,
)
response = llm_program()
print(str(response))
get_ipython().system('wget "https://www.dropbox.com/scl/fi/c1ec6osn0r2ggnitijqhl/mixed_wiki_images_small.zip?rlkey=swwxc7h4qtwlnhmby5fsnderd&dl=1" -O mixed_wiki_images_small.zip')
get_ipython().system('unzip mixed_wiki_images_small.zip')
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal
anthropic_mm_llm = AnthropicMultiModal(max_tokens=300)
from llama_index.core.schema import TextNode
from pathlib import Path
from llama_index.core import SimpleDirectoryReader
nodes = []
for img_file in Path("mixed_wiki_images_small").glob("*.png"):
print(img_file)
image_documents = SimpleDirectoryReader(input_files=[img_file]).load_data()
response = anthropic_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
metadata = {"img_file": img_file}
nodes.append(TextNode(text=str(response), metadata=metadata))
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.anthropic import Anthropic
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import Settings
from llama_index.core import StorageContext
import qdrant_client
client = qdrant_client.QdrantClient(path="qdrant_mixed_img")
vector_store = QdrantVectorStore(client=client, collection_name="collection")
embed_model = OpenAIEmbedding()
anthropic_mm_llm = | AnthropicMultiModal(max_tokens=300) | llama_index.multi_modal_llms.anthropic.AnthropicMultiModal |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.llms.openai import OpenAI
resp = OpenAI().complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.openai import OpenAI
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = OpenAI().chat(messages)
print(resp)
from llama_index.llms.openai import OpenAI
llm = OpenAI()
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
llm = OpenAI()
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="text-davinci-003")
resp = llm.complete("Paul Graham is ")
print(resp)
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.chat(messages)
print(resp)
from pydantic import BaseModel
from llama_index.core.llms.openai_utils import to_openai_tool
class Song(BaseModel):
"""A song with name and artist"""
name: str
artist: str
song_fn = to_openai_tool(Song)
from llama_index.llms.openai import OpenAI
response = | OpenAI() | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from genaix.list_corpora(client=client)
def delete_corpus(*, corpus_id: str) -> None:
client = | genaix.build_semantic_retriever() | llama_index.core.vector_stores.google.generativeai.genai_extension.build_semantic_retriever |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.program.openai import OpenAIPydanticProgram
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm = OpenAI(model="gpt-4", callback_manager=callback_manager)
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
program = OpenAIPydanticProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
llm=llm,
verbose=False,
)
movie_names = [
"The Shining",
"The Departed",
"Titanic",
"Goodfellas",
"Pretty Woman",
"Home Alone",
"Caged Fury",
"Edward Scissorhands",
"Total Recall",
"Ghost",
"Tremors",
"RoboCop",
"Rocky V",
]
from tqdm.notebook import tqdm
for movie_name in tqdm(movie_names):
output = program(movie_name=movie_name)
print(output.json())
finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl")
get_ipython().system('cat mock_finetune_songs.jsonl')
from llama_index.finetuning import OpenAIFinetuneEngine
finetune_engine = OpenAIFinetuneEngine(
"gpt-3.5-turbo",
"mock_finetune_songs.jsonl",
validate_json=False, # openai validate json code doesn't support function calling yet
)
finetune_engine.finetune()
finetune_engine.get_current_job()
ft_llm = finetune_engine.get_finetuned_model(temperature=0.3)
ft_program = OpenAIPydanticProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
llm=ft_llm,
verbose=False,
)
ft_program(movie_name="Goodfellas")
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pydantic import Field
from typing import List
class Citation(BaseModel):
"""Citation class."""
author: str = Field(
..., description="Inferred first author (usually last name"
)
year: int = Field(..., description="Inferred year")
desc: str = Field(
...,
description=(
"Inferred description from the text of the work that the author is"
" cited for"
),
)
class Response(BaseModel):
"""List of author citations.
Extracted over unstructured text.
"""
citations: List[Citation] = Field(
...,
description=(
"List of author citations (organized by author, year, and"
" description)."
),
)
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from pathlib import Path
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
metadata = {
"paper_title": "Llama 2: Open Foundation and Fine-Tuned Chat Models"
}
docs = [Document(text=doc_text, metadata=metadata)]
chunk_size = 1024
node_parser = | SentenceSplitter(chunk_size=chunk_size) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import nest_asyncio
nest_asyncio.apply()
from IPython.display import HTML, display
def set_css():
display(
HTML(
"""
<style>
pre {
white-space: pre-wrap;
}
</style>
"""
)
)
get_ipython().events.register("pre_run_cell", set_css)
get_ipython().system('mkdir data')
get_ipython().system('wget "https://www.dropbox.com/s/948jr9cfs7fgj99/UBER.zip?dl=1" -O data/UBER.zip')
get_ipython().system('unzip data/UBER.zip -d data')
from llama_index.readers.file import UnstructuredReader
from pathlib import Path
years = [2022, 2021, 2020, 2019]
loader = UnstructuredReader()
doc_set = {}
all_docs = []
for year in years:
year_docs = loader.load_data(
file=Path(f"./data/UBER/UBER_{year}.html"), split_documents=False
)
for d in year_docs:
d.metadata = {"year": year}
doc_set[year] = year_docs
all_docs.extend(year_docs)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.chunk_size = 512
Settings.chunk_overlap = 64
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
index_set = {}
for year in years:
storage_context = | StorageContext.from_defaults() | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
llm = | OpenAI(model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().system('pip install llama-index')
from llama_index.llms.anthropic import Anthropic
from llama_index.core import Settings
tokenizer = Anthropic().tokenizer
Settings.tokenizer = tokenizer
import os
os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY"
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229")
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.anthropic import Anthropic
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = Anthropic(model="claude-3-opus-20240229").chat(messages)
print(resp)
from llama_index.llms.anthropic import Anthropic
llm = | Anthropic(model="claude-3-opus-20240229", max_tokens=100) | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import (
VectorStoreIndex,
SimpleKeywordTableIndex,
SimpleDirectoryReader,
)
from llama_index.core import SummaryIndex
from llama_index.core.schema import IndexNode
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.callbacks import CallbackManager
from llama_index.llms.openai import OpenAI
wiki_titles = [
"Toronto",
"Seattle",
"Chicago",
"Boston",
"Houston",
]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
callback_manager = | CallbackManager([]) | llama_index.core.callbacks.CallbackManager |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import openai
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data()
index = | VectorStoreIndex.from_documents(data) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/lyft"
)
lyft_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/uber"
)
uber_index = load_index_from_storage(storage_context)
index_loaded = True
except:
index_loaded = False
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
if not index_loaded:
lyft_docs = SimpleDirectoryReader(
input_files=["./data/10k/lyft_2021.pdf"]
).load_data()
uber_docs = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
lyft_index = | VectorStoreIndex.from_documents(lyft_docs) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
MONGO_URI = os.environ["MONGO_URI"]
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.index_store.mongodb import MongoIndexStore
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store=MongoIndexStore.from_uri(uri=MONGO_URI),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store= | MongoIndexStore.from_uri(uri=MONGO_URI) | llama_index.storage.index_store.mongodb.MongoIndexStore.from_uri |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-tools-google')
from llama_index.readers.web import SimpleWebPageReader
reader = SimpleWebPageReader(html_to_text=True)
docs = reader.load_data(urls=["https://eugeneyan.com/writing/llm-patterns/"])
print(docs[0].get_content()[:400])
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(docs)
query_engine = index.as_query_engine()
response = query_engine.query("What are ways to evaluate LLMs?")
print(str(response))
from llama_index.tools.google import GmailToolSpec
tool_spec = | GmailToolSpec() | llama_index.tools.google.GmailToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-elasticsearch')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.elasticsearch import ElasticsearchStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text=(
"A bunch of scientists bring back dinosaurs and mayhem breaks"
" loose"
),
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
TextNode(
text=(
"Leo DiCaprio gets lost in a dream within a dream within a dream"
" within a ..."
),
metadata={
"year": 2010,
"director": "Christopher Nolan",
"rating": 8.2,
},
),
TextNode(
text=(
"A psychologist / detective gets lost in a series of dreams within"
" dreams within dreams and Inception reused the idea"
),
metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6},
),
| TextNode(
text=(
"A bunch of normal-sized women are supremely wholesome and some"
" men pine after them"
) | llama_index.core.schema.TextNode |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-deeplake')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
import os
import getpass
nest_asyncio.apply()
get_ipython().system('pip install deeplake beautifulsoup4 html2text tiktoken openai llama-index python-dotenv')
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
def get_all_links(url):
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to retrieve the page: {url}")
return []
soup = BeautifulSoup(response.content, "html.parser")
links = [
urljoin(url, a["href"])
for a in soup.find_all("a", href=True)
if a["href"]
]
return links
from langchain.document_loaders import AsyncHtmlLoader
from langchain.document_transformers import Html2TextTransformer
from llama_index.core import Document
def load_documents(url):
all_links = get_all_links(url)
loader = AsyncHtmlLoader(all_links)
docs = loader.load()
html2text = Html2TextTransformer()
docs_transformed = html2text.transform_documents(docs)
docs = [Document.from_langchain_format(doc) for doc in docs_transformed]
return docs
docs = load_documents("https://docs.deeplake.ai/en/latest/")
len(docs)
from llama_index.core.evaluation import generate_question_context_pairs
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.llms.openai import OpenAI
os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ")
os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass(
"Enter your ActiveLoop API token: "
) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens"
token = os.getenv("ACTIVELOOP_TOKEN")
vector_store = DeepLakeVectorStore(
dataset_path="hub://activeloop-test/deeplake_docs_deepmemory2",
overwrite=False, # set to True to overwrite the existing dataset
runtime={"tensor_db": True},
token=token,
)
def create_modules(vector_store, docs=[], populate_vector_store=True):
if populate_vector_store:
node_parser = SimpleNodeParser.from_defaults(chunk_size=512)
nodes = node_parser.get_nodes_from_documents(docs)
else:
nodes = []
for idx, node in enumerate(nodes):
node.id_ = f"node_{idx}"
llm = | OpenAI(model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
get_ipython().system('pip install llama_hub')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
docs0 = | PyMuPDFReader() | llama_index.readers.file.PyMuPDFReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceSplitter(chunk_size=256)
nodes = node_parser.get_nodes_from_documents(documents)
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
from llama_index.core.vector_stores.types import VectorStore
from llama_index.core.vector_stores import (
VectorStoreQuery,
VectorStoreQueryResult,
)
from typing import List, Any, Optional, Dict
from llama_index.core.schema import TextNode, BaseNode
import os
class BaseVectorStore(VectorStore):
"""Simple custom Vector Store.
Stores documents in a simple in-memory dict.
"""
stores_text: bool = True
def get(self, text_id: str) -> List[float]:
"""Get embedding."""
pass
def add(
self,
nodes: List[BaseNode],
) -> List[str]:
"""Add nodes to index."""
pass
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
pass
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Get nodes for response."""
pass
def persist(self, persist_path, fs=None) -> None:
"""Persist the SimpleVectorStore to a directory.
NOTE: we are not implementing this for now.
"""
pass
from dataclasses import fields
{f.name: f.type for f in fields(VectorStoreQuery)}
{f.name: f.type for f in fields(VectorStoreQueryResult)}
class VectorStore2(BaseVectorStore):
"""VectorStore2 (add/get/delete implemented)."""
stores_text: bool = True
def __init__(self) -> None:
"""Init params."""
self.node_dict: Dict[str, BaseNode] = {}
def get(self, text_id: str) -> List[float]:
"""Get embedding."""
return self.node_dict[text_id]
def add(
self,
nodes: List[BaseNode],
) -> List[str]:
"""Add nodes to index."""
for node in nodes:
self.node_dict[node.node_id] = node
def delete(self, node_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with node_id.
Args:
node_id: str
"""
del self.node_dict[node_id]
test_node = TextNode(id_="id1", text="hello world")
test_node2 = TextNode(id_="id2", text="foo bar")
test_nodes = [test_node, test_node2]
vector_store = VectorStore2()
vector_store.add(test_nodes)
node = vector_store.get("id1")
print(str(node))
from typing import Tuple
import numpy as np
def get_top_k_embeddings(
query_embedding: List[float],
doc_embeddings: List[List[float]],
doc_ids: List[str],
similarity_top_k: int = 5,
) -> Tuple[List[float], List]:
"""Get top nodes by similarity to the query."""
qembed_np = np.array(query_embedding)
dembed_np = np.array(doc_embeddings)
dproduct_arr = np.dot(dembed_np, qembed_np)
norm_arr = np.linalg.norm(qembed_np) * np.linalg.norm(
dembed_np, axis=1, keepdims=False
)
cos_sim_arr = dproduct_arr / norm_arr
tups = [(cos_sim_arr[i], doc_ids[i]) for i in range(len(doc_ids))]
sorted_tups = sorted(tups, key=lambda t: t[0], reverse=True)
sorted_tups = sorted_tups[:similarity_top_k]
result_similarities = [s for s, _ in sorted_tups]
result_ids = [n for _, n in sorted_tups]
return result_similarities, result_ids
class VectorStore3A(VectorStore2):
"""Implements semantic/dense search."""
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Get nodes for response."""
query_embedding = cast(List[float], query.query_embedding)
doc_embeddings = [n.embedding for n in self.node_dict.values()]
doc_ids = [n.node_id for n in self.node_dict.values()]
similarities, node_ids = get_top_k_embeddings(
query_embedding,
embeddings,
doc_ids,
similarity_top_k=query.similarity_top_k,
)
result_nodes = [self.node_dict[node_id] for node_id in node_ids]
return VectorStoreQueryResult(
nodes=result_nodes, similarities=similarities, ids=node_ids
)
from llama_index.core.vector_stores import MetadataFilters
from llama_index.core.schema import BaseNode
from typing import cast
def filter_nodes(nodes: List[BaseNode], filters: MetadataFilters):
filtered_nodes = []
for node in nodes:
matches = True
for f in filters.filters:
if f.key not in node.metadata:
matches = False
continue
if f.value != node.metadata[f.key]:
matches = False
continue
if matches:
filtered_nodes.append(node)
return filtered_nodes
def dense_search(query: VectorStoreQuery, nodes: List[BaseNode]):
"""Dense search."""
query_embedding = cast(List[float], query.query_embedding)
doc_embeddings = [n.embedding for n in nodes]
doc_ids = [n.node_id for n in nodes]
return get_top_k_embeddings(
query_embedding,
doc_embeddings,
doc_ids,
similarity_top_k=query.similarity_top_k,
)
class VectorStore3B(VectorStore2):
"""Implements Metadata Filtering."""
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Get nodes for response."""
nodes = self.node_dict.values()
if query.filters is not None:
nodes = filter_nodes(nodes, query.filters)
if len(nodes) == 0:
result_nodes = []
similarities = []
node_ids = []
else:
similarities, node_ids = dense_search(query, nodes)
result_nodes = [self.node_dict[node_id] for node_id in node_ids]
return VectorStoreQueryResult(
nodes=result_nodes, similarities=similarities, ids=node_ids
)
vector_store = VectorStore3B()
vector_store.add(nodes)
query_str = "Can you tell me about the key concepts for safety finetuning"
query_embedding = embed_model.get_query_embedding(query_str)
query_obj = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2
)
query_result = vector_store.query(query_obj)
for similarity, node in zip(query_result.similarities, query_result.nodes):
print(
"\n----------------\n"
f"[Node ID {node.node_id}] Similarity: {similarity}\n\n"
f"{node.get_content(metadata_mode='all')}"
"\n----------------\n\n"
)
filters = MetadataFilters.from_dict({"source": "24"})
query_obj = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, filters=filters
)
query_result = vector_store.query(query_obj)
for similarity, node in zip(query_result.similarities, query_result.nodes):
print(
"\n----------------\n"
f"[Node ID {node.node_id}] Similarity: {similarity}\n\n"
f"{node.get_content(metadata_mode='all')}"
"\n----------------\n\n"
)
from llama_index.core import VectorStoreIndex
index = | VectorStoreIndex.from_vector_store(vector_store) | llama_index.core.VectorStoreIndex.from_vector_store |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
from llama_index.llms.openai import OpenAI
from llama_index.core.query_engine import CitationQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
if not os.path.exists("./citation"):
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
index = VectorStoreIndex.from_documents(
documents,
)
index.storage_context.persist(persist_dir="./citation")
else:
index = load_index_from_storage(
| StorageContext.from_defaults(persist_dir="./citation") | llama_index.core.StorageContext.from_defaults |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = AgentFnComponent(fn=process_response_fn)
def process_agent_response_fn(
task: Task, state: Dict[str, Any], response_dict: dict
):
"""Process agent response."""
return (
AgentChatResponse(response_dict["response_str"]),
response_dict["is_done"],
)
process_agent_response = AgentFnComponent(fn=process_agent_response_fn)
from llama_index.core.query_pipeline import QueryPipeline as QP
from llama_index.llms.openai import OpenAI
qp.add_modules(
{
"agent_input": agent_input_component,
"react_prompt": react_prompt_component,
"llm": OpenAI(model="gpt-4-1106-preview"),
"react_output_parser": parse_react_output,
"run_tool": run_tool,
"process_response": process_response,
"process_agent_response": process_agent_response,
}
)
qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"])
qp.add_link(
"react_output_parser",
"run_tool",
condition_fn=lambda x: not x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link(
"react_output_parser",
"process_response",
condition_fn=lambda x: x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link("process_response", "process_agent_response")
qp.add_link("run_tool", "process_agent_response")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.clean_dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = QueryPipelineAgentWorker(qp)
agent = AgentRunner(
agent_worker, callback_manager= | CallbackManager([]) | llama_index.core.callbacks.CallbackManager |
get_ipython().run_line_magic('pip', 'install llama-index-packs-node-parser-semantic-chunking')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-node-parser-semantic-chunking-base')
from llama_index.core import SimpleDirectoryReader
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'pg_essay.txt'")
documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data()
from llama_index.packs.node_parser_semantic_chunking.base import SemanticChunker
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"SemanticChunkingQueryEnginePack",
"./semantic_chunking_pack",
skip_load=True,
)
from semantic_chunking_pack.base import SemanticChunker
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = | OpenAIEmbedding() | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-replicate')
get_ipython().system('pip install llama-index')
from llama_index.llms.replicate import Replicate
from llama_index.core.llms.llama_utils import messages_to_prompt
llm_13b = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
context_window=4096,
messages_to_prompt=messages_to_prompt, # override message representation for llama 2
)
llm_70b = Replicate(
model="replicate/llama70b-v2-chat:e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48",
context_window=4096,
messages_to_prompt=messages_to_prompt, # override message representation for llama 2
)
from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.llms import ChatMessage
bot_70b = SimpleChatEngine(
llm=llm_70b,
memory=ChatMemoryBuffer.from_defaults(llm=llm_70b),
prefix_messages=[
ChatMessage(
role="system", content="You are a rapper with an ENTJ personality"
)
],
)
bot_13b = SimpleChatEngine(
llm=llm_13b,
memory= | ChatMemoryBuffer.from_defaults(llm=llm_13b) | llama_index.core.memory.ChatMemoryBuffer.from_defaults |
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import DatasetGenerator
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
import random
random.seed(42)
random.shuffle(documents)
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
question_gen_query = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context from a "
"report on climate change and the oceans, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
dataset_generator = DatasetGenerator.from_documents(
documents[:50],
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("train_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
dataset_generator = DatasetGenerator.from_documents(
documents[
50:
], # since we generated ~1 question for 40 documents, we can skip the first 40
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("eval_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index.core import VectorStoreIndex, Settings
Settings.context_window = 2048
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=gpt_35_llm)
contexts = []
answers = []
for question in questions:
response = query_engine.query(question)
contexts.append([x.node.get_content() for x in response.source_nodes])
answers.append(str(response))
from datasets import Dataset
from ragas import evaluate
from ragas.metrics import answer_relevancy, faithfulness
ds = Dataset.from_dict(
{
"question": questions,
"answer": answers,
"contexts": contexts,
}
)
result = evaluate(ds, [answer_relevancy, faithfulness])
print(result)
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm = OpenAI(model="gpt-4", temperature=0.3)
Settings.callback_manager = (callback_manager,)
questions = []
with open("train_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=llm)
for question in questions:
response = query_engine.query(question)
finetuning_handler.save_finetuning_events("finetuning_events.jsonl")
get_ipython().system('python ./launch_training.py ./finetuning_events.jsonl')
ft_model_name = "ft:gpt-3.5-turbo-0613:..."
from llama_index.llms.openai import OpenAI
ft_llm = OpenAI(model=ft_model_name, temperature=0.3)
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=ft_llm)
contexts = []
answers = []
for question in questions:
response = query_engine.query(question)
contexts.append([x.node.get_content() for x in response.source_nodes])
answers.append(str(response))
from datasets import Dataset
from ragas import evaluate
from ragas.metrics import answer_relevancy, faithfulness
ds = Dataset.from_dict(
{
"question": questions,
"answer": answers,
"contexts": contexts,
}
)
result = evaluate(ds, [answer_relevancy, faithfulness])
print(result)
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
print(questions[12])
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
query_engine = index.as_query_engine(llm=gpt_35_llm)
response = query_engine.query(questions[12])
display_response(response)
from llama_index.llms.openai import OpenAI
ft_llm = OpenAI(model=ft_model_name, temperature=0.3)
query_engine = index.as_query_engine(llm=ft_llm)
response = query_engine.query(questions[12])
| display_response(response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import openai
import os
os.environ["OPENAI_API_KEY"] = "API_KEY_HERE"
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(data)
from llama_index.core.memory import ChatMemoryBuffer
memory = ChatMemoryBuffer.from_defaults(token_limit=1500)
chat_engine = index.as_chat_engine(
chat_mode="context",
memory=memory,
system_prompt=(
"You are a chatbot, able to have normal interactions, as well as talk"
" about an essay discussing Paul Grahams life."
),
)
response = chat_engine.chat("Hello!")
print(response)
response = chat_engine.chat("What did Paul Graham do growing up?")
print(response)
response = chat_engine.chat("Can you tell me more?")
print(response)
chat_engine.reset()
response = chat_engine.chat("Hello! What do you know?")
print(response)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0)
data = | SimpleDirectoryReader(input_dir="./data/paul_graham/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from IPython.display import Markdown, display
import chromadb
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("quickstart")
vector_store = | ChromaVectorStore(chroma_collection=chroma_collection) | llama_index.vector_stores.chroma.ChromaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=')
get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=')
get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma')
get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [SentenceSplitter(chunk_size=c) for c in sub_chunk_sizes]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_chunk = | RetrieverQueryEngine.from_args(retriever_chunk, llm=llm) | llama_index.core.query_engine.RetrieverQueryEngine.from_args |
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import DatasetGenerator
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
import random
random.seed(42)
random.shuffle(documents)
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
question_gen_query = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context from a "
"report on climate change and the oceans, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
dataset_generator = DatasetGenerator.from_documents(
documents[:50],
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("train_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
dataset_generator = DatasetGenerator.from_documents(
documents[
50:
], # since we generated ~1 question for 40 documents, we can skip the first 40
question_gen_query=question_gen_query,
llm=gpt_35_llm,
)
questions = dataset_generator.generate_questions_from_nodes(num=40)
print("Generated ", len(questions), " questions")
with open("eval_questions.txt", "w") as f:
for question in questions:
f.write(question + "\n")
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index.core import VectorStoreIndex, Settings
Settings.context_window = 2048
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=gpt_35_llm)
contexts = []
answers = []
for question in questions:
response = query_engine.query(question)
contexts.append([x.node.get_content() for x in response.source_nodes])
answers.append(str(response))
from datasets import Dataset
from ragas import evaluate
from ragas.metrics import answer_relevancy, faithfulness
ds = Dataset.from_dict(
{
"question": questions,
"answer": answers,
"contexts": contexts,
}
)
result = evaluate(ds, [answer_relevancy, faithfulness])
print(result)
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm = OpenAI(model="gpt-4", temperature=0.3)
Settings.callback_manager = (callback_manager,)
questions = []
with open("train_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=llm)
for question in questions:
response = query_engine.query(question)
finetuning_handler.save_finetuning_events("finetuning_events.jsonl")
get_ipython().system('python ./launch_training.py ./finetuning_events.jsonl')
ft_model_name = "ft:gpt-3.5-turbo-0613:..."
from llama_index.llms.openai import OpenAI
ft_llm = OpenAI(model=ft_model_name, temperature=0.3)
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
from llama_index import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(similarity_top_k=2, llm=ft_llm)
contexts = []
answers = []
for question in questions:
response = query_engine.query(question)
contexts.append([x.node.get_content() for x in response.source_nodes])
answers.append(str(response))
from datasets import Dataset
from ragas import evaluate
from ragas.metrics import answer_relevancy, faithfulness
ds = Dataset.from_dict(
{
"question": questions,
"answer": answers,
"contexts": contexts,
}
)
result = evaluate(ds, [answer_relevancy, faithfulness])
print(result)
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
questions = []
with open("eval_questions.txt", "r") as f:
for line in f:
questions.append(line.strip())
print(questions[12])
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
gpt_35_llm = | OpenAI(model="gpt-3.5-turbo", temperature=0.3) | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Response
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import PairwiseComparisonEvaluator
from llama_index.core.node_parser import SentenceSplitter
import pandas as pd
pd.set_option("display.max_colwidth", 0)
gpt4 = OpenAI(temperature=0, model="gpt-4")
evaluator_gpt4 = PairwiseComparisonEvaluator(llm=gpt4)
documents = SimpleDirectoryReader("./test_wiki_data/").load_data()
splitter_512 = | SentenceSplitter(chunk_size=512) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import QueryEngineTool, ToolMetadata
llm_35 = OpenAI(model="gpt-3.5-turbo-0613", temperature=0.3)
llm_4 = OpenAI(model="gpt-4-0613", temperature=0.3)
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/march"
)
march_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/june"
)
june_index = | load_index_from_storage(storage_context) | llama_index.core.load_index_from_storage |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
documents = | SimpleDirectoryReader("../data/paul_graham") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.readers.file import PDFReader
reader = PDFReader()
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'")
docs = reader.load_data("./data/10k/lyft_2021.pdf")
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceSplitter()
nodes = node_parser.get_nodes_from_documents(docs)
print(nodes[8].get_content(metadata_mode="all"))
get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet')
from pgvector.sqlalchemy import Vector
from sqlalchemy import insert, create_engine, String, text, Integer
from sqlalchemy.orm import declarative_base, mapped_column
engine = create_engine("postgresql+psycopg2://localhost/postgres")
with engine.connect() as conn:
conn.execute(text("CREATE EXTENSION IF NOT EXISTS vector"))
conn.commit()
Base = declarative_base()
class SECTextChunk(Base):
__tablename__ = "sec_text_chunk"
id = mapped_column(Integer, primary_key=True)
page_label = mapped_column(Integer)
file_name = mapped_column(String)
text = mapped_column(String)
embedding = mapped_column(Vector(384))
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = | HuggingFaceEmbedding(model_name="BAAI/bge-small-en") | llama_index.embeddings.huggingface.HuggingFaceEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.llama_dataset import download_llama_dataset
rag_dataset, documents = download_llama_dataset(
"PaulGrahamEssayDataset", "./paul_graham"
)
rag_dataset.to_pandas()[:5]
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
import nest_asyncio
nest_asyncio.apply()
prediction_dataset = await rag_dataset.amake_predictions_with(
query_engine=query_engine, show_progress=True
)
prediction_dataset.to_pandas()[:5]
import tqdm
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import (
CorrectnessEvaluator,
FaithfulnessEvaluator,
RelevancyEvaluator,
SemanticSimilarityEvaluator,
)
judges = {}
judges["correctness"] = CorrectnessEvaluator(
llm=OpenAI(temperature=0, model="gpt-4"),
)
judges["relevancy"] = RelevancyEvaluator(
llm=OpenAI(temperature=0, model="gpt-4"),
)
judges["faithfulness"] = FaithfulnessEvaluator(
llm=OpenAI(temperature=0, model="gpt-4"),
)
judges["semantic_similarity"] = SemanticSimilarityEvaluator()
evals = {
"correctness": [],
"relevancy": [],
"faithfulness": [],
"context_similarity": [],
}
for example, prediction in tqdm.tqdm(
zip(rag_dataset.examples, prediction_dataset.predictions)
):
correctness_result = judges["correctness"].evaluate(
query=example.query,
response=prediction.response,
reference=example.reference_answer,
)
relevancy_result = judges["relevancy"].evaluate(
query=example.query,
response=prediction.response,
contexts=prediction.contexts,
)
faithfulness_result = judges["faithfulness"].evaluate(
query=example.query,
response=prediction.response,
contexts=prediction.contexts,
)
semantic_similarity_result = judges["semantic_similarity"].evaluate(
query=example.query,
response="\n".join(prediction.contexts),
reference="\n".join(example.reference_contexts),
)
evals["correctness"].append(correctness_result)
evals["relevancy"].append(relevancy_result)
evals["faithfulness"].append(faithfulness_result)
evals["context_similarity"].append(semantic_similarity_result)
import json
evaluations_objects = {
"context_similarity": [e.dict() for e in evals["context_similarity"]],
"correctness": [e.dict() for e in evals["correctness"]],
"faithfulness": [e.dict() for e in evals["faithfulness"]],
"relevancy": [e.dict() for e in evals["relevancy"]],
}
with open("evaluations.json", "w") as json_file:
json.dump(evaluations_objects, json_file)
import pandas as pd
from llama_index.core.evaluation.notebook_utils import get_eval_results_df
deep_eval_df, mean_correctness_df = get_eval_results_df(
["base_rag"] * len(evals["correctness"]),
evals["correctness"],
metric="correctness",
)
deep_eval_df, mean_relevancy_df = get_eval_results_df(
["base_rag"] * len(evals["relevancy"]),
evals["relevancy"],
metric="relevancy",
)
_, mean_faithfulness_df = get_eval_results_df(
["base_rag"] * len(evals["faithfulness"]),
evals["faithfulness"],
metric="faithfulness",
)
_, mean_context_similarity_df = get_eval_results_df(
["base_rag"] * len(evals["context_similarity"]),
evals["context_similarity"],
metric="context_similarity",
)
mean_scores_df = pd.concat(
[
mean_correctness_df.reset_index(),
mean_relevancy_df.reset_index(),
mean_faithfulness_df.reset_index(),
mean_context_similarity_df.reset_index(),
],
axis=0,
ignore_index=True,
)
mean_scores_df = mean_scores_df.set_index("index")
mean_scores_df.index = mean_scores_df.index.set_names(["metrics"])
mean_scores_df
from llama_index.core.llama_pack import download_llama_pack
RagEvaluatorPack = | download_llama_pack("RagEvaluatorPack", "./pack") | llama_index.core.llama_pack.download_llama_pack |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4")
node_parser = SentenceSplitter(chunk_size=1024)
nodes = node_parser.get_nodes_from_documents(documents)
index = VectorStoreIndex(nodes)
query_engine = index.as_query_engine(llm=llm)
from llama_index.core.schema import BaseNode
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core import ChatPromptTemplate, PromptTemplate
from typing import Tuple, List
import re
llm = OpenAI(model="gpt-4")
QA_PROMPT = PromptTemplate(
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Query: {query_str}\n"
"Answer: "
)
def generate_answers_for_questions(
questions: List[str], context: str, llm: OpenAI
) -> str:
"""Generate answers for questions given context."""
answers = []
for question in questions:
fmt_qa_prompt = QA_PROMPT.format(
context_str=context, query_str=question
)
response_obj = llm.complete(fmt_qa_prompt)
answers.append(str(response_obj))
return answers
QUESTION_GEN_USER_TMPL = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"generate the relevant questions. "
)
QUESTION_GEN_SYS_TMPL = """\
You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided.\
"""
question_gen_template = ChatPromptTemplate(
message_templates=[
ChatMessage(role=MessageRole.SYSTEM, content=QUESTION_GEN_SYS_TMPL),
| ChatMessage(role=MessageRole.USER, content=QUESTION_GEN_USER_TMPL) | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray')
get_ipython().system('pip install llama-index')
import os
import sys
import logging
import textwrap
import warnings
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from llama_index.core import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
Document,
)
from llama_index.vector_stores.docarray import DocArrayHnswVectorStore
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "<your openai key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
from llama_index.core import StorageContext
vector_store = DocArrayHnswVectorStore(work_dir="hnsw_index")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = GPTVectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What was a hard moment for the author?")
print(textwrap.fill(str(response), 100))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
},
),
]
from llama_index.core import StorageContext
vector_store = DocArrayHnswVectorStore(work_dir="hnsw_filters")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = | GPTVectorStoreIndex(nodes, storage_context=storage_context) | llama_index.core.GPTVectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('pip install llama-index')
get_ipython().run_cell_magic('bash', '', 'wget -e robots=off --no-clobber --page-requisites \\\n --html-extension --convert-links --restrict-file-names=windows \\\n --domains docs.ray.io --no-parent --accept=html \\\n -P data/ https://docs.ray.io/en/master/ray-overview/installation.html\n')
from llama_index.readers.file import HTMLTagReader
reader = | HTMLTagReader(tag="section", ignore_no_id=True) | llama_index.readers.file.HTMLTagReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-program-evaporate')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
from llama_index.core import SimpleDirectoryReader
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.chunk_size = 512
city_nodes = {}
for wiki_title in wiki_titles:
docs = city_docs[wiki_title]
nodes = Settings.node_parser.get_nodes_from_documents(docs)
city_nodes[wiki_title] = nodes
from llama_index.program.evaporate import DFEvaporateProgram
program = DFEvaporateProgram.from_defaults(
fields_to_extract=["population"],
)
program.fit_fields(city_nodes["Toronto"][:1])
print(program.get_function_str("population"))
seattle_df = program(nodes=city_nodes["Seattle"][:1])
seattle_df
Settings.llm = | OpenAI(temperature=0, model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
class YourOpenAIAgent:
def __init__(
self,
tools: Sequence[BaseTool] = [],
llm: OpenAI = OpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
chat_history: List[ChatMessage] = [],
) -> None:
self._llm = llm
self._tools = {tool.metadata.name: tool for tool in tools}
self._chat_history = chat_history
def reset(self) -> None:
self._chat_history = []
def chat(self, message: str) -> str:
chat_history = self._chat_history
chat_history.append(ChatMessage(role="user", content=message))
tools = [
tool.metadata.to_openai_tool() for _, tool in self._tools.items()
]
ai_message = self._llm.chat(chat_history, tools=tools).message
additional_kwargs = ai_message.additional_kwargs
chat_history.append(ai_message)
tool_calls = ai_message.additional_kwargs.get("tool_calls", None)
if tool_calls is not None:
for tool_call in tool_calls:
function_message = self._call_function(tool_call)
chat_history.append(function_message)
ai_message = self._llm.chat(chat_history).message
chat_history.append(ai_message)
return ai_message.content
def _call_function(self, tool_call: dict) -> ChatMessage:
id_ = tool_call["id"]
function_call = tool_call["function"]
tool = self._tools[function_call["name"]]
output = tool(**json.loads(function_call["arguments"]))
return ChatMessage(
name=function_call["name"],
content=str(output),
role="tool",
additional_kwargs={
"tool_call_id": id_,
"name": function_call["name"],
},
)
agent = YourOpenAIAgent(tools=[multiply_tool, add_tool])
agent.chat("Hi")
agent.chat("What is 2123 * 215123")
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-0613")
agent = OpenAIAgent.from_tools(
[multiply_tool, add_tool], llm=llm, verbose=True
)
response = agent.chat("What is (121 * 3) + 42?")
print(str(response))
print(response.sources)
response = await agent.achat("What is 121 * 3?")
print(str(response))
response = agent.stream_chat(
"What is 121 * 2? Once you have the answer, use that number to write a"
" story about a group of mice."
)
response_gen = response.response_gen
for token in response_gen:
print(token, end="")
response = await agent.astream_chat(
"What is 121 + 8? Once you have the answer, use that number to write a"
" story about a group of mice."
)
response_gen = response.response_gen
async for token in response.async_response_gen():
print(token, end="")
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.prompts.system import SHAKESPEARE_WRITING_ASSISTANT
llm = | OpenAI(model="gpt-3.5-turbo-0613") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
tools = [multiply_tool, add_tool]
llm = | OpenAI(model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
from llama_hub.semanticscholar.base import SemanticScholarReader
import os
import openai
from llama_index.llms import OpenAI
from llama_index.query_engine import CitationQueryEngine
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
ServiceContext,
)
from llama_index.response.notebook_utils import display_response
s2reader = SemanticScholarReader()
openai.api_key = os.environ["OPENAI_API_KEY"]
service_context = ServiceContext.from_defaults(
llm=OpenAI(model="gpt-3.5-turbo", temperature=0)
)
query_space = "large language models"
full_text = True
total_papers = 50
persist_dir = (
"./citation_" + query_space + "_" + str(total_papers) + "_" + str(full_text)
)
if not os.path.exists(persist_dir):
documents = s2reader.load_data(query_space, total_papers, full_text=full_text)
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir=persist_dir)
else:
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=persist_dir),
service_context=service_context,
)
query_engine = CitationQueryEngine.from_args(
index,
similarity_top_k=3,
citation_chunk_size=512,
)
query_string = "limitations of using large language models"
response = query_engine.query(query_string)
display_response(
response, show_source=True, source_length=100, show_source_metadata=True
)
query_space = "covid 19 vaccine"
query_string = "List the efficacy numbers of the covid 19 vaccines"
full_text = True
total_papers = 50
persist_dir = (
"./citation_" + query_space + "_" + str(total_papers) + "_" + str(full_text)
)
if not os.path.exists(persist_dir):
documents = s2reader.load_data(query_space, total_papers, full_text=full_text)
index = | VectorStoreIndex.from_documents(documents, service_context=service_context) | llama_index.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-portkey')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install -U llama_index')
get_ipython().system('pip install -U portkey-ai')
from llama_index.llms.portkey import Portkey
from llama_index.core.llms import ChatMessage
import portkey as pk
import os
os.environ["PORTKEY_API_KEY"] = "PORTKEY_API_KEY"
openai_virtual_key_a = ""
openai_virtual_key_b = ""
anthropic_virtual_key_a = ""
anthropic_virtual_key_b = ""
cohere_virtual_key_a = ""
cohere_virtual_key_b = ""
os.environ["OPENAI_API_KEY"] = ""
os.environ["ANTHROPIC_API_KEY"] = ""
portkey_client = Portkey(
mode="single",
)
openai_llm = pk.LLMOptions(
provider="openai",
model="gpt-4",
virtual_key=openai_virtual_key_a,
)
portkey_client.add_llms(openai_llm)
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
print("Testing Portkey Llamaindex integration:")
response = portkey_client.chat(messages)
print(response)
prompt = "Why is the sky blue?"
print("\nTesting Stream Complete:\n")
response = portkey_client.stream_complete(prompt)
for i in response:
print(i.delta, end="", flush=True)
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
print("\nTesting Stream Chat:\n")
response = portkey_client.stream_chat(messages)
for i in response:
print(i.delta, end="", flush=True)
portkey_client = Portkey(mode="fallback")
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
llm1 = pk.LLMOptions(
provider="openai",
model="gpt-4",
retry_settings={"on_status_codes": [429, 500], "attempts": 2},
virtual_key=openai_virtual_key_a,
)
llm2 = pk.LLMOptions(
provider="openai",
model="gpt-3.5-turbo",
virtual_key=openai_virtual_key_b,
)
portkey_client.add_llms(llm_params=[llm1, llm2])
print("Testing Fallback & Retry functionality:")
response = portkey_client.chat(messages)
print(response)
portkey_client = Portkey(mode="ab_test")
messages = [
ChatMessage(role="system", content="You are a helpful assistant"),
ChatMessage(role="user", content="What can you do?"),
]
llm1 = pk.LLMOptions(
provider="openai",
model="gpt-4",
virtual_key=openai_virtual_key_a,
weight=0.2,
)
llm2 = pk.LLMOptions(
provider="openai",
model="gpt-3.5-turbo",
virtual_key=openai_virtual_key_a,
weight=0.8,
)
portkey_client.add_llms(llm_params=[llm1, llm2])
print("Testing Loadbalance functionality:")
response = portkey_client.chat(messages)
print(response)
import time
portkey_client = Portkey(mode="single")
openai_llm = pk.LLMOptions(
provider="openai",
model="gpt-3.5-turbo",
virtual_key=openai_virtual_key_a,
cache_status="semantic",
)
portkey_client.add_llms(openai_llm)
current_messages = [
| ChatMessage(role="system", content="You are a helpful assistant") | llama_index.core.llms.ChatMessage |