prompt
stringlengths 70
19.8k
| completion
stringlengths 8
1.03k
| api
stringlengths 23
93
|
---|---|---|
get_ipython().run_line_magic('pip', 'install llama-index-llms-friendli')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('env', 'FRIENDLI_TOKEN=...')
from llama_index.llms.friendli import Friendli
llm = Friendli()
from llama_index.core.llms import ChatMessage, MessageRole
message = | ChatMessage(role=MessageRole.USER, content="Tell me a joke.") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-graph-stores-neo4j')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-azure-openai')
import os
os.environ["OPENAI_API_KEY"] = "API_KEY_HERE"
import logging
import sys
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = llm
Settings.chunk_size = 512
import os
import json
import openai
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
KnowledgeGraphIndex,
)
import logging
import sys
from IPython.display import Markdown, display
logging.basicConfig(
stream=sys.stdout, level=logging.INFO
) # logging.DEBUG for more verbose output
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
openai.api_type = "azure"
openai.api_base = "https://<foo-bar>.openai.azure.com"
openai.api_version = "2022-12-01"
os.environ["OPENAI_API_KEY"] = "<your-openai-key>"
openai.api_key = os.getenv("OPENAI_API_KEY")
llm = AzureOpenAI(
deployment_name="<foo-bar-deployment>",
temperature=0,
openai_api_version=openai.api_version,
model_kwargs={
"api_key": openai.api_key,
"api_base": openai.api_base,
"api_type": openai.api_type,
"api_version": openai.api_version,
},
)
embedding_llm = OpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="<foo-bar-deployment>",
api_key=openai.api_key,
api_base=openai.api_base,
api_type=openai.api_type,
api_version=openai.api_version,
)
Settings.llm = llm
Settings.embed_model = embedding_llm
Settings.chunk_size = 512
from llama_index.core import KnowledgeGraphIndex, SimpleDirectoryReader
from llama_index.core import StorageContext
from llama_index.graph_stores.neo4j import Neo4jGraphStore
from llama_index.llms.openai import OpenAI
from IPython.display import Markdown, display
documents = SimpleDirectoryReader(
"../../../../examples/paul_graham_essay/data"
).load_data()
get_ipython().run_line_magic('pip', 'install neo4j')
username = "neo4j"
password = "retractor-knot-thermocouples"
url = "bolt://44.211.44.239:7687"
database = "neo4j"
graph_store = Neo4jGraphStore(
username=username,
password=password,
url=url,
database=database,
)
storage_context = | StorageContext.from_defaults(graph_store=graph_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-supabase')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.supabase import SupabaseVectorStore
import textwrap
import os
os.environ["OPENAI_API_KEY"] = "[your_openai_api_key]"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
vector_store = SupabaseVectorStore(
postgres_connection_string=(
"postgresql://<user>:<password>@<host>:<port>/<db_name>"
),
collection_name="base_demo",
)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-txtai')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import txtai
txtai_index = txtai.ann.ANNFactory.create({"backend": "numpy"})
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.txtai import TxtaiVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = TxtaiVectorStore(txtai_index=txtai_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
index.storage_context.persist()
vector_store = | TxtaiVectorStore.from_persist_dir("./storage") | llama_index.vector_stores.txtai.TxtaiVectorStore.from_persist_dir |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
import logging
import sys
import os
import qdrant_client
from IPython.display import Markdown, display
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core import StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
client = qdrant_client.QdrantClient(
location=":memory:"
)
vector_store = QdrantVectorStore(client=client, collection_name="paul_graham")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
query_engine = index.as_query_engine()
response = query_engine.query(
"What did the author do after his time at Viaweb?"
)
display(Markdown(f"<b>{response}</b>"))
import nest_asyncio
nest_asyncio.apply()
client = qdrant_client.QdrantClient(
url="http://localhost:6334",
prefer_grpc=True,
)
vector_store = QdrantVectorStore(
client=client, collection_name="paul_graham", prefer_grpc=True
)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install llama-index chromadb --quiet')
get_ipython().system('pip install chromadb==0.4.17')
get_ipython().system('pip install sentence-transformers')
get_ipython().system('pip install pydantic==1.10.11')
get_ipython().system('pip install open-clip-torch')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from IPython.display import Markdown, display
import chromadb
import os
import openai
OPENAI_API_KEY = ""
openai.api_key = OPENAI_API_KEY
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
import requests
def get_wikipedia_images(title):
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "imageinfo",
"iiprop": "url|dimensions|mime",
"generator": "images",
"gimlimit": "50",
},
).json()
image_urls = []
for page in response["query"]["pages"].values():
if page["imageinfo"][0]["url"].endswith(".jpg") or page["imageinfo"][
0
]["url"].endswith(".png"):
image_urls.append(page["imageinfo"][0]["url"])
return image_urls
from pathlib import Path
import urllib.request
image_uuid = 0
MAX_IMAGES_PER_WIKI = 20
wiki_titles = {
"Tesla Model X",
"Pablo Picasso",
"Rivian",
"The Lord of the Rings",
"The Matrix",
"The Simpsons",
}
data_path = Path("mixed_wiki")
if not data_path.exists():
Path.mkdir(data_path)
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
images_per_wiki = 0
try:
list_img_urls = get_wikipedia_images(title)
for url in list_img_urls:
if url.endswith(".jpg") or url.endswith(".png"):
image_uuid += 1
urllib.request.urlretrieve(
url, data_path / f"{image_uuid}.jpg"
)
images_per_wiki += 1
if images_per_wiki > MAX_IMAGES_PER_WIKI:
break
except:
print(str(Exception("No images found for Wikipedia page: ")) + title)
continue
from chromadb.utils.embedding_functions import OpenCLIPEmbeddingFunction
embedding_function = OpenCLIPEmbeddingFunction()
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import SimpleDirectoryReader, StorageContext
from chromadb.utils.data_loaders import ImageLoader
image_loader = ImageLoader()
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection(
"multimodal_collection",
embedding_function=embedding_function,
data_loader=image_loader,
)
documents = SimpleDirectoryReader("./mixed_wiki/").load_data()
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
retriever = index.as_retriever(similarity_top_k=50)
retrieval_results = retriever.retrieve("Picasso famous paintings")
from llama_index.core.schema import ImageNode
from llama_index.core.response.notebook_utils import (
display_source_node,
display_image_uris,
)
image_results = []
MAX_RES = 5
cnt = 0
for r in retrieval_results:
if isinstance(r.node, ImageNode):
image_results.append(r.node.metadata["file_path"])
else:
if cnt < MAX_RES:
| display_source_node(r) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai llama-index-tools-tavily-research llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-corrective-rag')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import os
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
tavily_ai_api_key = "<tavily_ai_api_key>"
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/'")
get_ipython().system("curl 'https://arxiv.org/pdf/2307.09288.pdf' -o 'data/llama2.pdf'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("data").load_data()
from llama_index.packs.corrective_rag import CorrectiveRAGPack
corrective_rag_pack = | CorrectiveRAGPack(documents, tavily_ai_apikey=tavily_ai_api_key) | llama_index.packs.corrective_rag.CorrectiveRAGPack |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = AgentFnComponent(fn=process_response_fn)
def process_agent_response_fn(
task: Task, state: Dict[str, Any], response_dict: dict
):
"""Process agent response."""
return (
AgentChatResponse(response_dict["response_str"]),
response_dict["is_done"],
)
process_agent_response = AgentFnComponent(fn=process_agent_response_fn)
from llama_index.core.query_pipeline import QueryPipeline as QP
from llama_index.llms.openai import OpenAI
qp.add_modules(
{
"agent_input": agent_input_component,
"react_prompt": react_prompt_component,
"llm": OpenAI(model="gpt-4-1106-preview"),
"react_output_parser": parse_react_output,
"run_tool": run_tool,
"process_response": process_response,
"process_agent_response": process_agent_response,
}
)
qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"])
qp.add_link(
"react_output_parser",
"run_tool",
condition_fn=lambda x: not x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link(
"react_output_parser",
"process_response",
condition_fn=lambda x: x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link("process_response", "process_agent_response")
qp.add_link("run_tool", "process_agent_response")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.clean_dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = QueryPipelineAgentWorker(qp)
agent = AgentRunner(
agent_worker, callback_manager=CallbackManager([]), verbose=True
)
task = agent.create_task(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
step_output = agent.run_step(task.task_id)
step_output = agent.run_step(task.task_id)
step_output.is_last
response = agent.finalize_response(task.task_id)
print(str(response))
agent.reset()
response = agent.chat(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
print(str(response))
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
from llama_index.core.agent import Task, AgentChatResponse
from typing import Dict, Any
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
)
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict:
"""Agent input function."""
if "convo_history" not in state:
state["convo_history"] = []
state["count"] = 0
state["convo_history"].append(f"User: {task.input}")
convo_history_str = "\n".join(state["convo_history"]) or "None"
return {"input": task.input, "convo_history": convo_history_str}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core import PromptTemplate
retry_prompt_str = """\
You are trying to generate a proper natural language query given a user input.
This query will then be interpreted by a downstream text-to-SQL agent which
will convert the query to a SQL statement. If the agent triggers an error,
then that will be reflected in the current conversation history (see below).
If the conversation history is None, use the user input. If its not None,
generate a new SQL query that avoids the problems of the previous SQL query.
Input: {input}
Convo history (failed attempts):
{convo_history}
New input: """
retry_prompt = PromptTemplate(retry_prompt_str)
from llama_index.core import Response
from typing import Tuple
validate_prompt_str = """\
Given the user query, validate whether the inferred SQL query and response from executing the query is correct and answers the query.
Answer with YES or NO.
Query: {input}
Inferred SQL query: {sql_query}
SQL Response: {sql_response}
Result: """
validate_prompt = PromptTemplate(validate_prompt_str)
MAX_ITER = 3
def agent_output_fn(
task: Task, state: Dict[str, Any], output: Response
) -> Tuple[AgentChatResponse, bool]:
"""Agent output component."""
print(f"> Inferred SQL Query: {output.metadata['sql_query']}")
print(f"> SQL Response: {str(output)}")
state["convo_history"].append(
f"Assistant (inferred SQL query): {output.metadata['sql_query']}"
)
state["convo_history"].append(f"Assistant (response): {str(output)}")
validate_prompt_partial = validate_prompt.as_query_component(
partial={
"sql_query": output.metadata["sql_query"],
"sql_response": str(output),
}
)
qp = QP(chain=[validate_prompt_partial, llm])
validate_output = qp.run(input=task.input)
state["count"] += 1
is_done = False
if state["count"] >= MAX_ITER:
is_done = True
if "YES" in validate_output.message.content:
is_done = True
return AgentChatResponse(response=str(output)), is_done
agent_output_component = | AgentFnComponent(fn=agent_output_fn) | llama_index.core.query_pipeline.AgentFnComponent |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.response.pprint_utils import pprint_response
from llama_index.llms.openai import OpenAI
llm = | OpenAI(temperature=0, model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-langchain')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('pip', 'install llama-index --quiet')
get_ipython().run_line_magic('pip', 'install gradientai --quiet')
import os
os.environ["GRADIENT_ACCESS_TOKEN"] = "{GRADIENT_ACCESS_TOKEN}"
os.environ["GRADIENT_WORKSPACE_ID"] = "{GRADIENT_WORKSPACE_ID}"
from llama_index.llms.gradient import GradientBaseModelLLM
llm = GradientBaseModelLLM(
base_model_slug="llama2-7b-chat",
max_tokens=400,
)
result = llm.complete("Can you tell me about large language models?")
print(result)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings.langchain import LangchainEmbedding
from langchain.embeddings import HuggingFaceEmbeddings
from llama_index.core.node_parser import SentenceSplitter
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
splitter = | SentenceSplitter(chunk_size=1024) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-extractors-entity')
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.extractors.entity import EntityExtractor
from llama_index.core.node_parser import SentenceSplitter
entity_extractor = EntityExtractor(
prediction_threshold=0.5,
label_entities=False, # include the entity label in the metadata (can be erroneous)
device="cpu", # set to "cuda" if you have a GPU
)
node_parser = | SentenceSplitter() | llama_index.core.node_parser.SentenceSplitter |
import sys
from llama_index import download_loader
BoardDocsReader = download_loader(
"BoardDocsReader",
loader_hub_url=(
"https://raw.githubusercontent.com/dweekly/llama-hub/boarddocs/llama_hub"
),
refresh_cache=True,
)
loader = BoardDocsReader(site="ca/redwood", committee_id="A4EP6J588C05")
from llama_index import GPTSimpleVectorIndex
documents = loader.load_data(meeting_ids=["CPSNV9612DF1"])
index = | GPTSimpleVectorIndex.from_documents(documents) | llama_index.GPTSimpleVectorIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-friendli')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('env', 'FRIENDLI_TOKEN=...')
from llama_index.llms.friendli import Friendli
llm = Friendli()
from llama_index.core.llms import ChatMessage, MessageRole
message = ChatMessage(role=MessageRole.USER, content="Tell me a joke.")
resp = llm.chat([message])
print(resp)
resp = llm.stream_chat([message])
for r in resp:
print(r.delta, end="")
resp = await llm.achat([message])
print(resp)
resp = await llm.astream_chat([message])
async for r in resp:
print(r.delta, end="")
prompt = "Draft a cover letter for a role in software engineering."
resp = llm.complete(prompt)
print(resp)
resp = llm.stream_complete(prompt)
for r in resp:
print(r.delta, end="")
resp = await llm.acomplete(prompt)
print(resp)
resp = await llm.astream_complete(prompt)
async for r in resp:
print(r.delta, end="")
from llama_index.llms.friendli import Friendli
llm = | Friendli(model="llama-2-70b-chat") | llama_index.llms.friendli.Friendli |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
splitter = SentenceSplitter(chunk_size=256)
index = VectorStoreIndex.from_documents(
documents, transformations=[splitter], show_progress=True
)
from llama_index.retrievers.bm25 import BM25Retriever
vector_retriever = index.as_retriever(similarity_top_k=5)
bm25_retriever = BM25Retriever.from_defaults(
docstore=index.docstore, similarity_top_k=10
)
from llama_index.core.retrievers import QueryFusionRetriever
retriever = QueryFusionRetriever(
[vector_retriever, bm25_retriever],
retriever_weights=[0.6, 0.4],
similarity_top_k=10,
num_queries=1, # set this to 1 to disable query generation
mode="relative_score",
use_async=True,
verbose=True,
)
import nest_asyncio
nest_asyncio.apply()
nodes_with_scores = retriever.retrieve(
"What happened at Interleafe and Viaweb?"
)
for node in nodes_with_scores:
print(f"Score: {node.score:.2f} - {node.text[:100]}...\n-----")
from llama_index.core.retrievers import QueryFusionRetriever
retriever = QueryFusionRetriever(
[vector_retriever, bm25_retriever],
retriever_weights=[0.6, 0.4],
similarity_top_k=10,
num_queries=1, # set this to 1 to disable query generation
mode="dist_based_score",
use_async=True,
verbose=True,
)
nodes_with_scores = retriever.retrieve(
"What happened at Interleafe and Viaweb?"
)
for node in nodes_with_scores:
print(f"Score: {node.score:.2f} - {node.text[:100]}...\n-----")
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = | RetrieverQueryEngine.from_args(retriever) | llama_index.core.query_engine.RetrieverQueryEngine.from_args |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=')
get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=')
get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma')
get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [SentenceSplitter(chunk_size=c) for c in sub_chunk_sizes]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm)
response = query_engine_chunk.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
)
extractors = [
SummaryExtractor(summaries=["self"], show_progress=True),
QuestionsAnsweredExtractor(questions=5, show_progress=True),
]
metadata_dicts = []
for extractor in extractors:
metadata_dicts.extend(extractor.extract(base_nodes))
def save_metadata_dicts(path):
with open(path, "w") as fp:
for m in metadata_dicts:
fp.write(json.dumps(m) + "\n")
def load_metadata_dicts(path):
with open(path, "r") as fp:
metadata_dicts = [json.loads(l) for l in fp.readlines()]
return metadata_dicts
save_metadata_dicts("data/llama2_metadata_dicts.jsonl")
metadata_dicts = load_metadata_dicts("data/llama2_metadata_dicts.jsonl")
import copy
all_nodes = copy.deepcopy(base_nodes)
for idx, d in enumerate(metadata_dicts):
inode_q = IndexNode(
text=d["questions_this_excerpt_can_answer"],
index_id=base_nodes[idx].node_id,
)
inode_s = IndexNode(
text=d["section_summary"], index_id=base_nodes[idx].node_id
)
all_nodes.extend([inode_q, inode_s])
all_nodes_dict = {n.node_id: n for n in all_nodes}
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
vector_index_metadata = | VectorStoreIndex(all_nodes) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-azurecosmosmongo')
get_ipython().run_line_magic('pip', 'install llama-index-llms-azure-openai')
get_ipython().system('pip install llama-index')
import os
import json
import openai
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
import os
llm = AzureOpenAI(
model_name=os.getenv("OPENAI_MODEL_COMPLETION"),
deployment_name=os.getenv("OPENAI_MODEL_COMPLETION"),
api_base=os.getenv("OPENAI_API_BASE"),
api_key=os.getenv("OPENAI_API_KEY"),
api_type=os.getenv("OPENAI_API_TYPE"),
api_version=os.getenv("OPENAI_API_VERSION"),
temperature=0,
)
embed_model = OpenAIEmbedding(
model=os.getenv("OPENAI_MODEL_EMBEDDING"),
deployment_name=os.getenv("OPENAI_DEPLOYMENT_EMBEDDING"),
api_base=os.getenv("OPENAI_API_BASE"),
api_key=os.getenv("OPENAI_API_KEY"),
api_type=os.getenv("OPENAI_API_TYPE"),
api_version=os.getenv("OPENAI_API_VERSION"),
)
from llama_index.core import Settings
Settings.llm = llm
Settings.embed_model = embed_model
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print("Document ID:", documents[0].doc_id)
import pymongo
from llama_index.vector_stores.azurecosmosmongo import (
AzureCosmosDBMongoDBVectorSearch,
)
from llama_index.core import VectorStoreIndex
from llama_index.core import StorageContext
from llama_index.core import SimpleDirectoryReader
connection_string = os.environ.get("AZURE_COSMOSDB_MONGODB_URI")
mongodb_client = pymongo.MongoClient(connection_string)
store = AzureCosmosDBMongoDBVectorSearch(
mongodb_client=mongodb_client,
db_name="demo_vectordb",
collection_name="paul_graham_essay",
)
storage_context = | StorageContext.from_defaults(vector_store=store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
TABLE_NAME = os.environ["DYNAMODB_TABLE_NAME"]
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
from llama_index.storage.index_store.dynamodb import DynamoDBIndexStore
from llama_index.vector_stores.dynamodb import DynamoDBVectorStore
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME),
vector_store=DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME),
vector_store=DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME),
)
summary_index = load_index_from_storage(
storage_context=storage_context, index_id=list_id
)
keyword_table_index = load_index_from_storage(
storage_context=storage_context, index_id=keyword_id
)
vector_index = load_index_from_storage(
storage_context=storage_context, index_id=vector_id
)
chatgpt = | OpenAI(temperature=0, model="gpt-3.5-turbo") | llama_index.llms.openai.OpenAI |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import TimeWeightedPostprocessor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from datetime import datetime, timedelta
from llama_index.core import StorageContext
now = datetime.now()
key = "__last_accessed__"
doc1 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v1.txt"]
).load_data()[0]
doc2 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v2.txt"]
).load_data()[0]
doc3 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v3.txt"]
).load_data()[0]
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
nodes1 = Settings.text_splitter.get_nodes_from_documents([doc1])
nodes2 = | Settings.text_splitter.get_nodes_from_documents([doc2]) | llama_index.core.Settings.text_splitter.get_nodes_from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = | SimpleDirectoryReader("../data/paul_graham") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
import nest_asyncio
nest_asyncio.apply()
import os
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
get_ipython().system('pip install wikipedia -q')
from llama_index.readers.wikipedia import WikipediaReader
cities = [
"San Francisco",
"Toronto",
"New York",
"Vancouver",
"Montreal",
"Tokyo",
"Singapore",
"Paris",
]
documents = WikipediaReader().load_data(
pages=[f"History of {x}" for x in cities]
)
QUESTION_GEN_PROMPT = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
from llama_index.core.evaluation import DatasetGenerator
from llama_index.llms.openai import OpenAI
gpt_35_llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
dataset_generator = DatasetGenerator.from_documents(
documents,
question_gen_query=QUESTION_GEN_PROMPT,
llm=gpt_35_llm,
num_questions_per_chunk=25,
)
qrd = dataset_generator.generate_dataset_from_nodes(num=350)
from llama_index.core import VectorStoreIndex
from llama_index.core.retrievers import VectorIndexRetriever
the_index = VectorStoreIndex.from_documents(documents=documents)
the_retriever = VectorIndexRetriever(
index=the_index,
similarity_top_k=2,
)
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
llm = HuggingFaceInferenceAPI(
model_name="meta-llama/Llama-2-7b-chat-hf",
context_window=2048, # to use refine
token=HUGGING_FACE_TOKEN,
)
query_engine = RetrieverQueryEngine.from_args(retriever=the_retriever, llm=llm)
import tqdm
train_dataset = []
num_train_questions = int(0.65 * len(qrd.qr_pairs))
for q, a in tqdm.tqdm(qrd.qr_pairs[:num_train_questions]):
data_entry = {"question": q, "reference": a}
response = query_engine.query(q)
response_struct = {}
response_struct["model"] = "llama-2"
response_struct["text"] = str(response)
response_struct["context"] = (
response.source_nodes[0].node.text[:1000] + "..."
)
data_entry["response_data"] = response_struct
train_dataset.append(data_entry)
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.evaluation import CorrectnessEvaluator
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
gpt_4_llm = OpenAI(
temperature=0, model="gpt-4", callback_manager=callback_manager
)
gpt4_judge = | CorrectnessEvaluator(llm=gpt_4_llm) | llama_index.core.evaluation.CorrectnessEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
display_source_node(node)
from llama_index.core.tools import RetrieverTool
vector_retriever = VectorIndexRetriever(index)
bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
retriever_tools = [
RetrieverTool.from_defaults(
retriever=vector_retriever,
description="Useful in most cases",
),
RetrieverTool.from_defaults(
retriever=bm25_retriever,
description="Useful if searching about specific information",
),
]
from llama_index.core.retrievers import RouterRetriever
retriever = RouterRetriever.from_defaults(
retriever_tools=retriever_tools,
llm=llm,
select_multi=True,
)
nodes = retriever.retrieve(
"Can you give me all the context regarding the author's life?"
)
for node in nodes:
display_source_node(node)
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import (
VectorStoreIndex,
StorageContext,
SimpleDirectoryReader,
Document,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
llm = OpenAI(model="gpt-3.5-turbo")
splitter = SentenceSplitter(chunk_size=256)
nodes = splitter.get_nodes_from_documents(
[Document(text=documents[0].get_content()[:1000000])]
)
storage_context = | StorageContext.from_defaults() | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
os.environ[
"PINECONE_API_KEY"
] = "<Your Pinecone API key, from app.pinecone.io>"
os.environ["OPENAI_API_KEY"] = "sk-..."
from pinecone import Pinecone
from pinecone import ServerlessSpec
api_key = os.environ["PINECONE_API_KEY"]
pc = Pinecone(api_key=api_key)
pc.create_index(
"quickstart-index",
dimension=1536,
metric="euclidean",
spec=ServerlessSpec(cloud="aws", region="us-west-2"),
)
pinecone_index = pc.Index("quickstart-index")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="test_05_14"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = | VectorStoreIndex(nodes, storage_context=storage_context) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-replicate')
get_ipython().system('pip install llama-index')
import os
os.environ["REPLICATE_API_TOKEN"] = "<your API key>"
from llama_index.llms.replicate import Replicate
llm = | Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5"
) | llama_index.llms.replicate.Replicate |
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import (
FixedRecencyPostprocessor,
EmbeddingRecencyPostprocessor,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import StorageContext
def get_file_metadata(file_name: str):
"""Get file metadata."""
if "v1" in file_name:
return {"date": "2020-01-01"}
elif "v2" in file_name:
return {"date": "2020-02-03"}
elif "v3" in file_name:
return {"date": "2022-04-12"}
else:
raise ValueError("invalid file")
documents = SimpleDirectoryReader(
input_files=[
"test_versioned_data/paul_graham_essay_v1.txt",
"test_versioned_data/paul_graham_essay_v2.txt",
"test_versioned_data/paul_graham_essay_v3.txt",
],
file_metadata=get_file_metadata,
).load_data()
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
nodes = Settings.text_splitter.get_nodes_from_documents(documents)
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
storage_context = StorageContext.from_defaults(docstore=docstore)
print(documents[2].get_text())
index = VectorStoreIndex(nodes, storage_context=storage_context)
node_postprocessor = | FixedRecencyPostprocessor() | llama_index.core.postprocessor.FixedRecencyPostprocessor |
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from IPython.display import Markdown, display
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
selector = LLMMultiSelector.from_defaults()
from llama_index.core.tools import ToolMetadata
tool_choices = [
ToolMetadata(
name="covid_nyt",
description=("This tool contains a NYT news article about COVID-19"),
),
ToolMetadata(
name="covid_wiki",
description=("This tool contains the Wikipedia page about COVID-19"),
),
ToolMetadata(
name="covid_tesla",
description=("This tool contains the Wikipedia page about apples"),
),
]
display_prompt_dict(selector.get_prompts())
selector_result = selector.select(
tool_choices, query="Tell me more about COVID-19"
)
selector_result.selections
from llama_index.core import PromptTemplate
from llama_index.llms.openai import OpenAI
query_gen_str = """\
You are a helpful assistant that generates multiple search queries based on a \
single input query. Generate {num_queries} search queries, one on each line, \
related to the following input query:
Query: {query}
Queries:
"""
query_gen_prompt = PromptTemplate(query_gen_str)
llm = OpenAI(model="gpt-3.5-turbo")
def generate_queries(query: str, llm, num_queries: int = 4):
response = llm.predict(
query_gen_prompt, num_queries=num_queries, query=query
)
queries = response.split("\n")
queries_str = "\n".join(queries)
print(f"Generated queries:\n{queries_str}")
return queries
queries = generate_queries("What happened at Interleaf and Viaweb?", llm)
queries
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.llms.openai import OpenAI
hyde = HyDEQueryTransform(include_original=True)
llm = OpenAI(model="gpt-3.5-turbo")
query_bundle = hyde.run("What is Bel?")
new_query.custom_embedding_strs
from llama_index.core.question_gen import LLMQuestionGenerator
from llama_index.question_gen.openai import OpenAIQuestionGenerator
from llama_index.llms.openai import OpenAI
llm = OpenAI()
question_gen = OpenAIQuestionGenerator.from_defaults(llm=llm)
display_prompt_dict(question_gen.get_prompts())
from llama_index.core.tools import ToolMetadata
tool_choices = [
ToolMetadata(
name="uber_2021_10k",
description=(
"Provides information about Uber financials for year 2021"
),
),
ToolMetadata(
name="lyft_2021_10k",
description=(
"Provides information about Lyft financials for year 2021"
),
),
]
from llama_index.core import QueryBundle
query_str = "Compare and contrast Uber and Lyft"
choices = question_gen.generate(tool_choices, QueryBundle(query_str=query_str))
choices
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.tools import FunctionTool
from llama_index.core.llms import ChatMessage
def execute_sql(sql: str) -> str:
"""Given a SQL input string, execute it."""
return f"Executed {sql}"
def add(a: int, b: int) -> int:
"""Add two numbers."""
return a + b
tool1 = FunctionTool.from_defaults(fn=execute_sql)
tool2 = FunctionTool.from_defaults(fn=add)
tools = [tool1, tool2]
chat_formatter = ReActChatFormatter()
output_parser = | ReActOutputParser() | llama_index.core.agent.react.output_parser.ReActOutputParser |
get_ipython().system('pip install llama-index llama-hub rank-bm25')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system('wget "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1" -O paul_graham_essay.txt')
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
reader = SimpleDirectoryReader(input_files=["paul_graham_essay.txt"])
documents = reader.load_data()
node_parser = | SimpleNodeParser.from_defaults() | llama_index.core.node_parser.SimpleNodeParser.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(vector_store_query_mode="mmr")
response = query_engine.query("What did the author do growing up?")
print(response)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine_with_threshold = index.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response = query_engine_with_threshold.query(
"What did the author do growing up?"
)
print(response)
index1 = VectorStoreIndex.from_documents(documents)
query_engine_no_mrr = index1.as_query_engine()
response_no_mmr = query_engine_no_mrr.query(
"What did the author do growing up?"
)
index2 = VectorStoreIndex.from_documents(documents)
query_engine_with_high_threshold = index2.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.8}
)
response_low_threshold = query_engine_with_high_threshold.query(
"What did the author do growing up?"
)
index3 = VectorStoreIndex.from_documents(documents)
query_engine_with_low_threshold = index3.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response_high_threshold = query_engine_with_low_threshold.query(
"What did the author do growing up?"
)
print(
"Scores without MMR ",
[node.score for node in response_no_mmr.source_nodes],
)
print(
"Scores with MMR and a threshold of 0.8 ",
[node.score for node in response_high_threshold.source_nodes],
)
print(
"Scores with MMR and a threshold of 0.2 ",
[node.score for node in response_low_threshold.source_nodes],
)
documents = SimpleDirectoryReader("../data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(
documents,
)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 0.1},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
from llama_index.core.response.notebook_utils import display_source_node
for n in nodes:
display_source_node(n, source_length=1000)
retriever = index.as_retriever(
vector_store_query_mode="mmr",
similarity_top_k=3,
vector_store_kwargs={"mmr_threshold": 0.5},
)
nodes = retriever.retrieve(
"What did the author do during his time in Y Combinator?"
)
for n in nodes:
| display_source_node(n, source_length=1000) | llama_index.core.response.notebook_utils.display_source_node |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = AgentFnComponent(fn=process_response_fn)
def process_agent_response_fn(
task: Task, state: Dict[str, Any], response_dict: dict
):
"""Process agent response."""
return (
AgentChatResponse(response_dict["response_str"]),
response_dict["is_done"],
)
process_agent_response = AgentFnComponent(fn=process_agent_response_fn)
from llama_index.core.query_pipeline import QueryPipeline as QP
from llama_index.llms.openai import OpenAI
qp.add_modules(
{
"agent_input": agent_input_component,
"react_prompt": react_prompt_component,
"llm": OpenAI(model="gpt-4-1106-preview"),
"react_output_parser": parse_react_output,
"run_tool": run_tool,
"process_response": process_response,
"process_agent_response": process_agent_response,
}
)
qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"])
qp.add_link(
"react_output_parser",
"run_tool",
condition_fn=lambda x: not x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link(
"react_output_parser",
"process_response",
condition_fn=lambda x: x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link("process_response", "process_agent_response")
qp.add_link("run_tool", "process_agent_response")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.clean_dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = QueryPipelineAgentWorker(qp)
agent = AgentRunner(
agent_worker, callback_manager=CallbackManager([]), verbose=True
)
task = agent.create_task(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
step_output = agent.run_step(task.task_id)
step_output = agent.run_step(task.task_id)
step_output.is_last
response = agent.finalize_response(task.task_id)
print(str(response))
agent.reset()
response = agent.chat(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
print(str(response))
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
from llama_index.core.agent import Task, AgentChatResponse
from typing import Dict, Any
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
)
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict:
"""Agent input function."""
if "convo_history" not in state:
state["convo_history"] = []
state["count"] = 0
state["convo_history"].append(f"User: {task.input}")
convo_history_str = "\n".join(state["convo_history"]) or "None"
return {"input": task.input, "convo_history": convo_history_str}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core import PromptTemplate
retry_prompt_str = """\
You are trying to generate a proper natural language query given a user input.
This query will then be interpreted by a downstream text-to-SQL agent which
will convert the query to a SQL statement. If the agent triggers an error,
then that will be reflected in the current conversation history (see below).
If the conversation history is None, use the user input. If its not None,
generate a new SQL query that avoids the problems of the previous SQL query.
Input: {input}
Convo history (failed attempts):
{convo_history}
New input: """
retry_prompt = PromptTemplate(retry_prompt_str)
from llama_index.core import Response
from typing import Tuple
validate_prompt_str = """\
Given the user query, validate whether the inferred SQL query and response from executing the query is correct and answers the query.
Answer with YES or NO.
Query: {input}
Inferred SQL query: {sql_query}
SQL Response: {sql_response}
Result: """
validate_prompt = PromptTemplate(validate_prompt_str)
MAX_ITER = 3
def agent_output_fn(
task: Task, state: Dict[str, Any], output: Response
) -> Tuple[AgentChatResponse, bool]:
"""Agent output component."""
print(f"> Inferred SQL Query: {output.metadata['sql_query']}")
print(f"> SQL Response: {str(output)}")
state["convo_history"].append(
f"Assistant (inferred SQL query): {output.metadata['sql_query']}"
)
state["convo_history"].append(f"Assistant (response): {str(output)}")
validate_prompt_partial = validate_prompt.as_query_component(
partial={
"sql_query": output.metadata["sql_query"],
"sql_response": str(output),
}
)
qp = | QP(chain=[validate_prompt_partial, llm]) | llama_index.core.query_pipeline.QueryPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-4")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
from llama_index.core import VectorStoreIndex
from llama_index.agent.openai import OpenAIAgent
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core import VectorStoreIndex
tool_dict = {}
for wiki_title in wiki_titles:
vector_index = VectorStoreIndex.from_documents(
city_docs[wiki_title],
)
vector_query_engine = vector_index.as_query_engine(llm=llm)
vector_tool = QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name=wiki_title,
description=("Useful for questions related to" f" {wiki_title}"),
),
)
tool_dict[wiki_title] = vector_tool
from llama_index.core import VectorStoreIndex
from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping
tool_mapping = SimpleToolNodeMapping.from_objects(list(tool_dict.values()))
tool_index = ObjectIndex.from_objects(
list(tool_dict.values()),
tool_mapping,
VectorStoreIndex,
)
tool_retriever = tool_index.as_retriever(similarity_top_k=1)
from llama_index.core.llms import ChatMessage
from llama_index.core import ChatPromptTemplate
from typing import List
GEN_SYS_PROMPT_STR = """\
Task information is given below.
Given the task, please generate a system prompt for an OpenAI-powered bot to solve this task:
{task} \
"""
gen_sys_prompt_messages = [
ChatMessage(
role="system",
content="You are helping to build a system prompt for another bot.",
),
| ChatMessage(role="user", content=GEN_SYS_PROMPT_STR) | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-llms-predibase')
get_ipython().system('pip install llama-index --quiet')
get_ipython().system('pip install predibase --quiet')
get_ipython().system('pip install sentence-transformers --quiet')
import os
os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
from llama_index.llms.predibase import PredibaseLLM
llm = PredibaseLLM(
model_name="llama-2-13b", temperature=0.3, max_new_tokens=512
)
result = llm.complete("Can you recommend me a nice dry white wine?")
print(result)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core.node_parser import SentenceSplitter
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
llm = PredibaseLLM(
model_name="llama-2-13b",
temperature=0.3,
max_new_tokens=400,
context_window=1024,
)
embed_model = resolve_embed_model("local:BAAI/bge-small-en-v1.5")
splitter = | SentenceSplitter(chunk_size=1024) | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-llms-replicate')
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
os.environ["REPLICATE_API_TOKEN"] = "YOUR_REPLICATE_TOKEN"
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from IPython.display import Markdown, display
from llama_index.llms.replicate import Replicate
from llama_index.core.llms.llama_utils import (
messages_to_prompt,
completion_to_prompt,
)
LLAMA_13B_V2_CHAT = "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5"
def custom_completion_to_prompt(completion: str) -> str:
return completion_to_prompt(
completion,
system_prompt=(
"You are a Q&A assistant. Your goal is to answer questions as "
"accurately as possible is the instructions and context provided."
),
)
llm = Replicate(
model=LLAMA_13B_V2_CHAT,
temperature=0.01,
context_window=4096,
completion_to_prompt=custom_completion_to_prompt,
messages_to_prompt=messages_to_prompt,
)
from llama_index.core import Settings
Settings.llm = llm
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-txtai')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import txtai
txtai_index = txtai.ann.ANNFactory.create({"backend": "numpy"})
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.txtai import TxtaiVectorStore
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = TxtaiVectorStore(txtai_index=txtai_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
index.storage_context.persist()
vector_store = TxtaiVectorStore.from_persist_dir("./storage")
storage_context = StorageContext.from_defaults(
vector_store=vector_store, persist_dir="./storage"
)
index = | load_index_from_storage(storage_context=storage_context) | llama_index.core.load_index_from_storage |
from llama_index.core import Document, VectorStoreIndex
from llama_index.core.retrievers import VectorIndexRetriever
documents = [
Document(
text="A group of penguins, known as a 'waddle' on land, shuffled across the Antarctic ice, their tuxedo-like plumage standing out against the snow."
),
Document(
text="Emperor penguins, the tallest of all penguin species, can dive deeper than any other bird, reaching depths of over 500 meters."
),
Document(
text="Penguins' black and white coloring is a form of camouflage called countershading; from above, their black back blends with the ocean depths, and from below, their white belly matches the bright surface."
),
Document(
text="Despite their upright stance, penguins are birds that cannot fly; their wings have evolved into flippers, making them expert swimmers."
),
Document(
text="The fastest species, the Gentoo penguin, can swim up to 36 kilometers per hour, using their flippers and streamlined bodies to slice through the water."
),
Document(
text="Penguins are social birds; many species form large colonies for breeding, which can number in the tens of thousands."
),
Document(
text="Intriguingly, penguins have excellent hearing and rely on distinct calls to identify their mates and chicks amidst the noisy colonies."
),
Document(
text="The smallest penguin species, the Little Blue Penguin, stands just about 40 cm tall and is found along the coastlines of southern Australia and New Zealand."
),
Document(
text="During the breeding season, male Emperor penguins endure the harsh Antarctic winter for months, fasting and incubating their eggs, while females hunt at sea."
),
Document(
text="Penguins consume a variety of seafood; their diet mainly consists of fish, squid, and krill, which they catch on their diving expeditions."
),
]
index = | VectorStoreIndex.from_documents(documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.response.pprint_utils import pprint_response
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.query_engine import SubQuestionQueryEngine
import os
os.environ["OPENAI_API_KEY"] = "OPENAI_API_KEY"
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0.2, model="gpt-3.5-turbo")
get_ipython().system("mkdir -p 'data/10q/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'")
march_2022 = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_march_2022.pdf"]
).load_data()
june_2022 = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_june_2022.pdf"]
).load_data()
sept_2022 = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_sept_2022.pdf"]
).load_data()
march_index = VectorStoreIndex.from_documents(march_2022)
june_index = | VectorStoreIndex.from_documents(june_2022) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import camelot
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.schema import IndexNode
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from typing import List
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
file_path = "billionaires_page.pdf"
reader = PyMuPDFReader()
docs = reader.load(file_path)
def get_tables(path: str, pages: List[int]):
table_dfs = []
for page in pages:
table_list = camelot.read_pdf(path, pages=str(page))
table_df = table_list[0].df
table_df = (
table_df.rename(columns=table_df.iloc[0])
.drop(table_df.index[0])
.reset_index(drop=True)
)
table_dfs.append(table_df)
return table_dfs
table_dfs = get_tables(file_path, pages=[3, 25])
table_dfs[0]
table_dfs[1]
llm = OpenAI(model="gpt-4")
df_query_engines = [
| PandasQueryEngine(table_df, llm=llm) | llama_index.core.query_engine.PandasQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import datasets
dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR")
dataset
from llama_index.core import get_tokenizer
import re
from typing import Set, List
tokenizer = get_tokenizer()
sample_size = 5
def get_reactions_row(raw_target: str) -> List[str]:
"""Get reactions from a single row."""
reaction_pattern = re.compile(r"reactions:\s*(.*)")
reaction_match = reaction_pattern.search(raw_target)
if reaction_match:
reactions = reaction_match.group(1).split(",")
reactions = [r.strip().lower() for r in reactions]
else:
reactions = []
return reactions
def get_reactions_set(dataset) -> Set[str]:
"""Get set of all reactions."""
reactions = set()
for data in dataset["train"]:
reactions.update(set(get_reactions_row(data["target"])))
return reactions
def get_samples(dataset, sample_size: int = 5):
"""Get processed sample.
Contains source text and also the reaction label.
Parse reaction text to specifically extract reactions.
"""
samples = []
for idx, data in enumerate(dataset["train"]):
if idx >= sample_size:
break
text = data["fulltext_processed"]
raw_target = data["target"]
reactions = get_reactions_row(raw_target)
samples.append({"text": text, "reactions": reactions})
return samples
from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack
from llama_index.core.llama_pack import download_llama_pack
InferRetrieveRerankPack = download_llama_pack(
"InferRetrieveRerankPack",
"./irr_pack",
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-16k")
pred_context = """\
The output predictins should be a list of comma-separated adverse \
drug reactions. \
"""
reranker_top_n = 10
pack = InferRetrieveRerankPack(
get_reactions_set(dataset),
llm=llm,
pred_context=pred_context,
reranker_top_n=reranker_top_n,
verbose=True,
)
samples = get_samples(dataset, sample_size=5)
pred_reactions = pack.run(inputs=[s["text"] for s in samples])
gt_reactions = [s["reactions"] for s in samples]
pred_reactions[2]
gt_reactions[2]
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
from llama_index.core.output_parsers import ChainableOutputParser
from typing import List
import random
all_reactions = get_reactions_set(dataset)
random.sample(all_reactions, 5)
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core import VectorStoreIndex
reaction_nodes = [TextNode(text=r) for r in all_reactions]
pipeline = IngestionPipeline(transformations=[OpenAIEmbedding()])
reaction_nodes = await pipeline.arun(documents=reaction_nodes)
index = VectorStoreIndex(reaction_nodes)
reaction_nodes[0].embedding
reaction_retriever = index.as_retriever(similarity_top_k=2)
nodes = reaction_retriever.retrieve("abdominal")
print([n.get_content() for n in nodes])
infer_prompt_str = """\
Your job is to output a list of predictions given context from a given piece of text. The text context,
and information regarding the set of valid predictions is given below.
Return the predictions as a comma-separated list of strings.
Text Context:
{doc_context}
Prediction Info:
{pred_context}
Predictions: """
infer_prompt = PromptTemplate(infer_prompt_str)
class PredsOutputParser(ChainableOutputParser):
"""Predictions output parser."""
def parse(self, output: str) -> List[str]:
"""Parse predictions."""
tokens = output.split(",")
return [t.strip() for t in tokens]
preds_output_parser = PredsOutputParser()
rerank_str = """\
Given a piece of text, rank the {num} labels above based on their relevance \
to this piece of text. The labels \
should be listed in descending order using identifiers. \
The most relevant labels should be listed first. \
The output format should be [] > [], e.g., [1] > [2]. \
Only response the ranking results, \
do not say any word or explain. \
Here is a given piece of text: {query}.
"""
rerank_prompt = PromptTemplate(rerank_str)
def infer_retrieve_rerank(
query: str,
retriever: BaseRetriever,
llm: LLM,
pred_context: str,
reranker_top_n: int = 3,
):
"""Infer retrieve rerank."""
infer_prompt_c = infer_prompt.as_query_component(
partial={"pred_context": pred_context}
)
infer_pipeline = | QueryPipeline(chain=[infer_prompt_c, llm, preds_output_parser]) | llama_index.core.query_pipeline.QueryPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index qdrant-client pypdf "transformers[torch]"')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/").load_data()
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core import Settings
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
client = QdrantClient(path="./qdrant_data")
vector_store = QdrantVectorStore(
"llama2_paper", client=client, enable_hybrid=True, batch_size=20
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
Settings.chunk_size = 512
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
query_engine = index.as_query_engine(
similarity_top_k=2, sparse_top_k=12, vector_store_query_mode="hybrid"
)
from IPython.display import display, Markdown
response = query_engine.query(
"How was Llama2 specifically trained differently from Llama1?"
)
display(Markdown(str(response)))
print(len(response.source_nodes))
from IPython.display import display, Markdown
query_engine = index.as_query_engine(
similarity_top_k=2,
)
response = query_engine.query(
"How was Llama2 specifically trained differently from Llama1?"
)
display(Markdown(str(response)))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core import Settings
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import AsyncQdrantClient
aclient = AsyncQdrantClient(path="./qdrant_data_async")
vector_store = QdrantVectorStore(
collection_name="llama2_paper",
aclient=aclient,
enable_hybrid=True,
batch_size=20,
)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY')
get_ipython().system('pip install llama-index pypdf')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [
SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes
]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm)
response = query_engine_chunk.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
)
extractors = [
SummaryExtractor(summaries=["self"], show_progress=True),
QuestionsAnsweredExtractor(questions=5, show_progress=True),
]
node_to_metadata = {}
for extractor in extractors:
metadata_dicts = extractor.extract(base_nodes)
for node, metadata in zip(base_nodes, metadata_dicts):
if node.node_id not in node_to_metadata:
node_to_metadata[node.node_id] = metadata
else:
node_to_metadata[node.node_id].update(metadata)
def save_metadata_dicts(path, data):
with open(path, "w") as fp:
json.dump(data, fp)
def load_metadata_dicts(path):
with open(path, "r") as fp:
data = json.load(fp)
return data
save_metadata_dicts("data/llama2_metadata_dicts.json", node_to_metadata)
metadata_dicts = load_metadata_dicts("data/llama2_metadata_dicts.json")
import copy
all_nodes = copy.deepcopy(base_nodes)
for node_id, metadata in node_to_metadata.items():
for val in metadata.values():
all_nodes.append(IndexNode(text=val, index_id=node_id))
all_nodes_dict = {n.node_id: n for n in all_nodes}
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
vector_index_metadata = VectorStoreIndex(all_nodes)
vector_retriever_metadata = vector_index_metadata.as_retriever(
similarity_top_k=2
)
retriever_metadata = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_metadata},
node_dict=all_nodes_dict,
verbose=False,
)
nodes = retriever_metadata.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_metadata = RetrieverQueryEngine.from_args(
retriever_metadata, llm=llm
)
response = query_engine_metadata.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
from llama_index.core.evaluation import (
generate_question_context_pairs,
EmbeddingQAFinetuneDataset,
)
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_dataset = generate_question_context_pairs(
base_nodes, OpenAI(model="gpt-3.5-turbo")
)
eval_dataset.save_json("data/llama2_eval_dataset.json")
eval_dataset = EmbeddingQAFinetuneDataset.from_json(
"data/llama2_eval_dataset.json"
)
import pandas as pd
from llama_index.core.evaluation import (
RetrieverEvaluator,
get_retrieval_results_df,
)
top_k = 10
def display_results(names, results_arr):
"""Display results from evaluate."""
hit_rates = []
mrrs = []
for name, eval_results in zip(names, results_arr):
metric_dicts = []
for eval_result in eval_results:
metric_dict = eval_result.metric_vals_dict
metric_dicts.append(metric_dict)
results_df = pd.DataFrame(metric_dicts)
hit_rate = results_df["hit_rate"].mean()
mrr = results_df["mrr"].mean()
hit_rates.append(hit_rate)
mrrs.append(mrr)
final_df = pd.DataFrame(
{"retrievers": names, "hit_rate": hit_rates, "mrr": mrrs}
)
display(final_df)
vector_retriever_chunk = vector_index_chunk.as_retriever(
similarity_top_k=top_k
)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
retriever_evaluator = RetrieverEvaluator.from_metric_names(
["mrr", "hit_rate"], retriever=retriever_chunk
)
results_chunk = await retriever_evaluator.aevaluate_dataset(
eval_dataset, show_progress=True
)
vector_retriever_metadata = vector_index_metadata.as_retriever(
similarity_top_k=top_k
)
retriever_metadata = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_metadata},
node_dict=all_nodes_dict,
verbose=True,
)
retriever_evaluator = RetrieverEvaluator.from_metric_names(
["mrr", "hit_rate"], retriever=retriever_metadata
)
results_metadata = await retriever_evaluator.aevaluate_dataset(
eval_dataset, show_progress=True
)
base_retriever = base_index.as_retriever(similarity_top_k=top_k)
retriever_evaluator = RetrieverEvaluator.from_metric_names(
["mrr", "hit_rate"], retriever=base_retriever
)
results_base = await retriever_evaluator.aevaluate_dataset(
eval_dataset, show_progress=True
)
full_results_df = | get_retrieval_results_df(
[
"Base Retriever",
"Retriever (Chunk References)",
"Retriever (Metadata References) | llama_index.core.evaluation.get_retrieval_results_df |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.program.openai import OpenAIPydanticProgram
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
llm = OpenAI(model="gpt-4", callback_manager=callback_manager)
prompt_template_str = """\
Generate an example album, with an artist and a list of songs. \
Using the movie {movie_name} as inspiration.\
"""
program = OpenAIPydanticProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
llm=llm,
verbose=False,
)
movie_names = [
"The Shining",
"The Departed",
"Titanic",
"Goodfellas",
"Pretty Woman",
"Home Alone",
"Caged Fury",
"Edward Scissorhands",
"Total Recall",
"Ghost",
"Tremors",
"RoboCop",
"Rocky V",
]
from tqdm.notebook import tqdm
for movie_name in tqdm(movie_names):
output = program(movie_name=movie_name)
print(output.json())
finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl")
get_ipython().system('cat mock_finetune_songs.jsonl')
from llama_index.finetuning import OpenAIFinetuneEngine
finetune_engine = OpenAIFinetuneEngine(
"gpt-3.5-turbo",
"mock_finetune_songs.jsonl",
validate_json=False, # openai validate json code doesn't support function calling yet
)
finetune_engine.finetune()
finetune_engine.get_current_job()
ft_llm = finetune_engine.get_finetuned_model(temperature=0.3)
ft_program = OpenAIPydanticProgram.from_defaults(
output_cls=Album,
prompt_template_str=prompt_template_str,
llm=ft_llm,
verbose=False,
)
ft_program(movie_name="Goodfellas")
get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pydantic import Field
from typing import List
class Citation(BaseModel):
"""Citation class."""
author: str = Field(
..., description="Inferred first author (usually last name"
)
year: int = Field(..., description="Inferred year")
desc: str = Field(
...,
description=(
"Inferred description from the text of the work that the author is"
" cited for"
),
)
class Response(BaseModel):
"""List of author citations.
Extracted over unstructured text.
"""
citations: List[Citation] = Field(
...,
description=(
"List of author citations (organized by author, year, and"
" description)."
),
)
from llama_index.readers.file import PyMuPDFReader
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from pathlib import Path
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
metadata = {
"paper_title": "Llama 2: Open Foundation and Fine-Tuned Chat Models"
}
docs = [Document(text=doc_text, metadata=metadata)]
chunk_size = 1024
node_parser = SentenceSplitter(chunk_size=chunk_size)
nodes = node_parser.get_nodes_from_documents(docs)
len(nodes)
from llama_index.core import Settings
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([finetuning_handler])
Settings.chunk_size = chunk_size
gpt_4_llm = OpenAI(
model="gpt-4-0613", temperature=0.3, callback_manager=callback_manager
)
gpt_35_llm = OpenAI(
model="gpt-3.5-turbo-0613",
temperature=0.3,
callback_manager=callback_manager,
)
eval_llm = OpenAI(model="gpt-4-0613", temperature=0)
from llama_index.core.evaluation import DatasetGenerator
from llama_index.core import SummaryIndex
from llama_index.core import PromptTemplate
from tqdm.notebook import tqdm
from tqdm.asyncio import tqdm_asyncio
fp = open("data/qa_pairs.jsonl", "w")
question_gen_prompt = PromptTemplate(
"""
{query_str}
Context:
{context_str}
Questions:
"""
)
question_gen_query = """\
Snippets from a research paper is given below. It contains citations.
Please generate questions from the text asking about these citations.
For instance, here are some sample questions:
Which citations correspond to related works on transformer models?
Tell me about authors that worked on advancing RLHF.
Can you tell me citations corresponding to all computer vision works? \
"""
qr_pairs = []
node_questions_tasks = []
for idx, node in enumerate(nodes[:39]):
num_questions = 1 # change this number to increase number of nodes
dataset_generator = DatasetGenerator(
[node],
question_gen_query=question_gen_query,
text_question_template=question_gen_prompt,
llm=eval_llm,
metadata_mode="all",
num_questions_per_chunk=num_questions,
)
task = dataset_generator.agenerate_questions_from_nodes(num=num_questions)
node_questions_tasks.append(task)
node_questions_lists = await tqdm_asyncio.gather(*node_questions_tasks)
node_questions_lists
from llama_index.core import VectorStoreIndex
gpt4_index = VectorStoreIndex(nodes=nodes)
gpt4_query_engine = gpt4_index.as_query_engine(
output_cls=Response, similarity_top_k=1, llm=gpt_4_llm
)
from json import JSONDecodeError
for idx, node in enumerate(tqdm(nodes[:39])):
node_questions_0 = node_questions_lists[idx]
for question in node_questions_0:
try:
gpt4_query_engine.query(question)
except Exception as e:
print(f"Error for question {question}, {repr(e)}")
pass
finetuning_handler.save_finetuning_events("llama2_citation_events.jsonl")
from llama_index.finetuning import OpenAIFinetuneEngine
finetune_engine = OpenAIFinetuneEngine(
"gpt-3.5-turbo",
"llama2_citation_events.jsonl",
validate_json=False, # openai validate json code doesn't support function calling yet
)
finetune_engine.finetune()
finetune_engine.get_current_job()
ft_llm = finetune_engine.get_finetuned_model(temperature=0.3)
from llama_index.core import VectorStoreIndex
vector_index = VectorStoreIndex(nodes=nodes)
query_engine = vector_index.as_query_engine(
output_cls=Response, similarity_top_k=1, llm=ft_llm
)
base_index = | VectorStoreIndex(nodes=nodes) | llama_index.core.VectorStoreIndex |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(response_mode="tree_summarize")
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
prompts_dict = query_engine.response_synthesizer.get_prompts()
display_prompt_dict(prompts_dict)
query_engine = index.as_query_engine(response_mode="compact")
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core import PromptTemplate
query_engine = index.as_query_engine(response_mode="tree_summarize")
new_summary_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query in the style of a Shakespeare play.\n"
"Query: {query_str}\n"
"Answer: "
)
new_summary_tmpl = PromptTemplate(new_summary_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:summary_template": new_summary_tmpl}
)
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core.query_engine import (
RouterQueryEngine,
FLAREInstructQueryEngine,
)
from llama_index.core.selectors import LLMMultiSelector
from llama_index.core.evaluation import FaithfulnessEvaluator, DatasetGenerator
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.tools import QueryEngineTool
query_tool = QueryEngineTool.from_defaults(
query_engine=query_engine, description="test description"
)
router_query_engine = | RouterQueryEngine.from_defaults([query_tool]) | llama_index.core.query_engine.RouterQueryEngine.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate')
import os
OPENAI_API_TOKEN = "sk-<your-openai-api-token>"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
REPLICATE_API_TOKEN = "" # Your Relicate API token here
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from pathlib import Path
input_image_path = Path("restaurant_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png')
from pydantic import BaseModel
class Restaurant(BaseModel):
"""Data model for an restaurant."""
restaurant: str
food: str
discount: str
price: str
rating: str
review: str
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
image_documents = SimpleDirectoryReader("./restaurant_images").load_data()
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1000
)
from PIL import Image
import matplotlib.pyplot as plt
imageUrl = "./restaurant_images/fried_chicken.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
can you summarize what is in the image\
and return the answer with json format \
"""
openai_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser= | PydanticOutputParser(Restaurant) | llama_index.core.output_parsers.PydanticOutputParser |
from llama_hub.openalex import OpenAlexReader
from llama_index.llms import OpenAI
from llama_index.query_engine import CitationQueryEngine
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.response.notebook_utils import display_response
openalex_reader = OpenAlexReader(email="[email protected]")
query = "biases in large language models"
works = openalex_reader.load_data(query, full_text=False)
service_context = ServiceContext.from_defaults(
llm= | OpenAI(model="gpt-3.5-turbo", temperature=0) | llama_index.llms.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.node_parser import SentenceWindowNodeParser
from llama_index.core.node_parser import SentenceSplitter
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
text_splitter = SentenceSplitter()
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-mpnet-base-v2", max_length=512
)
from llama_index.core import Settings
Settings.llm = llm
Settings.embed_model = embed_model
Settings.text_splitter = text_splitter
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["./IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
nodes = node_parser.get_nodes_from_documents(documents)
base_nodes = text_splitter.get_nodes_from_documents(documents)
from llama_index.core import VectorStoreIndex
sentence_index = VectorStoreIndex(nodes)
base_index = VectorStoreIndex(base_nodes)
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
query_engine = sentence_index.as_query_engine(
similarity_top_k=2,
node_postprocessors=[
MetadataReplacementPostProcessor(target_metadata_key="window")
],
)
window_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(window_response)
window = window_response.source_nodes[0].node.metadata["window"]
sentence = window_response.source_nodes[0].node.metadata["original_text"]
print(f"Window: {window}")
print("------------------")
print(f"Original Sentence: {sentence}")
query_engine = base_index.as_query_engine(similarity_top_k=2)
vector_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(vector_response)
query_engine = base_index.as_query_engine(similarity_top_k=5)
vector_response = query_engine.query(
"What are the concerns surrounding the AMOC?"
)
print(vector_response)
for source_node in window_response.source_nodes:
print(source_node.node.metadata["original_text"])
print("--------")
for node in vector_response.source_nodes:
print("AMOC mentioned?", "AMOC" in node.node.text)
print("--------")
print(vector_response.source_nodes[2].node.text)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
import random
nest_asyncio.apply()
len(base_nodes)
num_nodes_eval = 30
sample_eval_nodes = random.sample(base_nodes[:200], num_nodes_eval)
dataset_generator = DatasetGenerator(
sample_eval_nodes,
llm= | OpenAI(model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openrouter')
get_ipython().system('pip install llama-index')
from llama_index.llms.openrouter import OpenRouter
from llama_index.core.llms import ChatMessage
llm = OpenRouter(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="gryphe/mythomax-l2-13b",
)
message = | ChatMessage(role="user", content="Tell me a joke") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
from llama_index.storage.kvstore.firestore import FirestoreKVStore
from llama_index.storage.docstore.firestore import FirestoreDocumentStore
from llama_index.storage.index_store.firestore import FirestoreIndexStore
kvstore = FirestoreKVStore()
storage_context = StorageContext.from_defaults(
docstore=FirestoreDocumentStore(kvstore),
index_store= | FirestoreIndexStore(kvstore) | llama_index.storage.index_store.firestore.FirestoreIndexStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().system('pip install llama-index gradientai -q')
import os
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.finetuning import GradientFinetuneEngine
os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY")
os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>"
from pydantic import BaseModel
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.llms.openai import OpenAI
from llama_index.llms.gradient import GradientBaseModelLLM
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
openai_handler = LlamaDebugHandler()
openai_callback = CallbackManager([openai_handler])
openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback)
gradient_handler = | LlamaDebugHandler() | llama_index.core.callbacks.LlamaDebugHandler |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=')
get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=')
get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma')
get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [SentenceSplitter(chunk_size=c) for c in sub_chunk_sizes]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm)
response = query_engine_chunk.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
)
extractors = [
| SummaryExtractor(summaries=["self"], show_progress=True) | llama_index.core.extractors.SummaryExtractor |
get_ipython().system("mkdir -p 'data/'")
get_ipython().system("curl 'https://arxiv.org/pdf/2307.09288.pdf' -o 'data/llama2.pdf'")
from llama_index.core import SimpleDirectoryReader
documents = | SimpleDirectoryReader("data") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.llama_dataset import download_llama_dataset
rag_dataset, documents = download_llama_dataset(
"PaulGrahamEssayDataset", "./paul_graham"
)
rag_dataset.to_pandas()[:5]
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
import nest_asyncio
nest_asyncio.apply()
prediction_dataset = await rag_dataset.amake_predictions_with(
query_engine=query_engine, show_progress=True
)
prediction_dataset.to_pandas()[:5]
import tqdm
from llama_index.llms.openai import OpenAI
from llama_index.core.evaluation import (
CorrectnessEvaluator,
FaithfulnessEvaluator,
RelevancyEvaluator,
SemanticSimilarityEvaluator,
)
judges = {}
judges["correctness"] = CorrectnessEvaluator(
llm=OpenAI(temperature=0, model="gpt-4"),
)
judges["relevancy"] = RelevancyEvaluator(
llm=OpenAI(temperature=0, model="gpt-4"),
)
judges["faithfulness"] = FaithfulnessEvaluator(
llm=OpenAI(temperature=0, model="gpt-4"),
)
judges["semantic_similarity"] = | SemanticSimilarityEvaluator() | llama_index.core.evaluation.SemanticSimilarityEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
MONGO_URI = os.environ["MONGO_URI"]
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.index_store.mongodb import MongoIndexStore
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store=MongoIndexStore.from_uri(uri=MONGO_URI),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore=MongoDocumentStore.from_uri(uri=MONGO_URI),
index_store=MongoIndexStore.from_uri(uri=MONGO_URI),
)
summary_index = load_index_from_storage(
storage_context=storage_context, index_id=list_id
)
vector_index = load_index_from_storage(
storage_context=storage_context, vector_id=vector_id
)
keyword_table_index = load_index_from_storage(
storage_context=storage_context, keyword_id=keyword_id
)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = chatgpt
Settings.chunk_size = 1024
query_engine = summary_index.as_query_engine()
list_response = query_engine.query("What is a summary of this document?")
| display_response(list_response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-flag-embedding-reranker')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install git+https://github.com/FlagOpen/FlagEmbedding.git')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-small-en-v1.5"
)
index = VectorStoreIndex.from_documents(documents=documents)
from llama_index.postprocessor.flag_embedding_reranker import (
FlagEmbeddingReranker,
)
rerank = | FlagEmbeddingReranker(model="BAAI/bge-reranker-large", top_n=5) | llama_index.postprocessor.flag_embedding_reranker.FlagEmbeddingReranker |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().system('pip install llama-index')
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west4-gcp-free")
import os
import getpass
import openai
openai.api_key = "sk-<your-key>"
try:
pinecone.create_index(
"quickstart-index", dimension=1536, metric="euclidean", pod_type="p1"
)
except Exception:
pass
pinecone_index = pinecone.Index("quickstart-index")
pinecone_index.delete(deleteAll=True, namespace="test")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text=(
"Michael Jordan is a retired professional basketball player,"
" widely regarded as one of the greatest basketball players of all"
" time."
),
metadata={
"category": "Sports",
"country": "United States",
"gender": "male",
"born": 1963,
},
),
TextNode(
text=(
"Angelina Jolie is an American actress, filmmaker, and"
" humanitarian. She has received numerous awards for her acting"
" and is known for her philanthropic work."
),
metadata={
"category": "Entertainment",
"country": "United States",
"gender": "female",
"born": 1975,
},
),
TextNode(
text=(
"Elon Musk is a business magnate, industrial designer, and"
" engineer. He is the founder, CEO, and lead designer of SpaceX,"
" Tesla, Inc., Neuralink, and The Boring Company."
),
metadata={
"category": "Business",
"country": "United States",
"gender": "male",
"born": 1971,
},
),
TextNode(
text=(
"Rihanna is a Barbadian singer, actress, and businesswoman. She"
" has achieved significant success in the music industry and is"
" known for her versatile musical style."
),
metadata={
"category": "Music",
"country": "Barbados",
"gender": "female",
"born": 1988,
},
),
TextNode(
text=(
"Cristiano Ronaldo is a Portuguese professional footballer who is"
" considered one of the greatest football players of all time. He"
" has won numerous awards and set multiple records during his"
" career."
),
metadata={
"category": "Sports",
"country": "Portugal",
"gender": "male",
"born": 1985,
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="test"
)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import datasets
dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR")
dataset
from llama_index.core import get_tokenizer
import re
from typing import Set, List
tokenizer = get_tokenizer()
sample_size = 5
def get_reactions_row(raw_target: str) -> List[str]:
"""Get reactions from a single row."""
reaction_pattern = re.compile(r"reactions:\s*(.*)")
reaction_match = reaction_pattern.search(raw_target)
if reaction_match:
reactions = reaction_match.group(1).split(",")
reactions = [r.strip().lower() for r in reactions]
else:
reactions = []
return reactions
def get_reactions_set(dataset) -> Set[str]:
"""Get set of all reactions."""
reactions = set()
for data in dataset["train"]:
reactions.update(set(get_reactions_row(data["target"])))
return reactions
def get_samples(dataset, sample_size: int = 5):
"""Get processed sample.
Contains source text and also the reaction label.
Parse reaction text to specifically extract reactions.
"""
samples = []
for idx, data in enumerate(dataset["train"]):
if idx >= sample_size:
break
text = data["fulltext_processed"]
raw_target = data["target"]
reactions = get_reactions_row(raw_target)
samples.append({"text": text, "reactions": reactions})
return samples
from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack
from llama_index.core.llama_pack import download_llama_pack
InferRetrieveRerankPack = download_llama_pack(
"InferRetrieveRerankPack",
"./irr_pack",
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-16k")
pred_context = """\
The output predictins should be a list of comma-separated adverse \
drug reactions. \
"""
reranker_top_n = 10
pack = InferRetrieveRerankPack(
get_reactions_set(dataset),
llm=llm,
pred_context=pred_context,
reranker_top_n=reranker_top_n,
verbose=True,
)
samples = get_samples(dataset, sample_size=5)
pred_reactions = pack.run(inputs=[s["text"] for s in samples])
gt_reactions = [s["reactions"] for s in samples]
pred_reactions[2]
gt_reactions[2]
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
from llama_index.core.output_parsers import ChainableOutputParser
from typing import List
import random
all_reactions = get_reactions_set(dataset)
random.sample(all_reactions, 5)
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core import VectorStoreIndex
reaction_nodes = [TextNode(text=r) for r in all_reactions]
pipeline = IngestionPipeline(transformations=[OpenAIEmbedding()])
reaction_nodes = await pipeline.arun(documents=reaction_nodes)
index = | VectorStoreIndex(reaction_nodes) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/'")
get_ipython().system("curl 'https://arxiv.org/pdf/2307.09288.pdf' -o 'data/llama2.pdf'")
from llama_index.readers.file import UnstructuredReader
documents = UnstructuredReader().load_data("data/llama2.pdf")
from llama_index.core.llama_pack import download_llama_pack
DenseXRetrievalPack = | download_llama_pack("DenseXRetrievalPack", "./dense_pack") | llama_index.core.llama_pack.download_llama_pack |
get_ipython().run_line_magic('pip', 'install llama-index-readers-database')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from __future__ import absolute_import
import os
os.environ["OPENAI_API_KEY"] = ""
from llama_index.readers.database import DatabaseReader
from llama_index.core import VectorStoreIndex
db = DatabaseReader(
scheme="postgresql", # Database Scheme
host="localhost", # Database Host
port="5432", # Database Port
user="postgres", # Database User
password="FakeExamplePassword", # Database Password
dbname="postgres", # Database Name
)
print(type(db))
print(type(db.load_data))
print(type(db.sql_database))
print(type(db.sql_database.from_uri))
print(type(db.sql_database.get_single_table_info))
print(type(db.sql_database.get_table_columns))
print(type(db.sql_database.get_usable_table_names))
print(type(db.sql_database.insert_into_table))
print(type(db.sql_database.run_sql))
print(type(db.sql_database.dialect))
print(type(db.sql_database.engine))
print(type(db.sql_database))
db_from_sql_database = DatabaseReader(sql_database=db.sql_database)
print(type(db_from_sql_database))
print(type(db.sql_database.engine))
db_from_engine = DatabaseReader(engine=db.sql_database.engine)
print(type(db_from_engine))
print(type(db.uri))
db_from_uri = | DatabaseReader(uri=db.uri) | llama_index.readers.database.DatabaseReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-colbert')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install torch sentence-transformers')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.indices.managed.google import GoogleIndex
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
project_name = "TODO-your-project-name" # @param {type:"string"}
email = "[email protected]" # @param {type:"string"}
client_file_name = "client_secret.json"
get_ipython().system('gcloud config set project $project_name')
get_ipython().system('gcloud config set account $email')
get_ipython().system('gcloud auth application-default login --no-browser --client-id-file=$client_file_name --scopes="https://www.googleapis.com/auth/generative-language.retriever,https://www.googleapis.com/auth/cloud-platform"')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
google_index = GoogleIndex.create_corpus(display_name="My first corpus!")
print(f"Newly created corpus ID is {google_index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
google_index.insert_documents(documents)
google_index = GoogleIndex.from_corpus(corpus_id="")
query_engine = google_index.as_query_engine()
response = query_engine.query("which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.VERBOSE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
)
query_engine = google_index.as_query_engine(
temperature=0.3,
answer_style=GenerateAnswerRequest.AnswerStyle.EXTRACTIVE,
)
response = query_engine.query("Which program did this author attend?")
print(response)
from llama_index.core.response.notebook_utils import display_source_node
for r in response.source_nodes:
display_source_node(r, source_length=1000)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.llms.gemini import Gemini
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.embeddings.gemini import GeminiEmbedding
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.7, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
)
reranker = LLMRerank(
top_n=5,
llm= | Gemini(api_key=GOOGLE_API_KEY) | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=')
get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=')
get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma')
get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [ | SentenceSplitter(chunk_size=c) | llama_index.core.node_parser.SentenceSplitter |
import os
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
from llama_index.llms.openai import OpenAI
llm = OpenAI("gpt-4")
from llama_index.core.llama_pack import download_llama_pack
SelfDiscoverPack = | download_llama_pack("SelfDiscoverPack", "./self_discover_pack") | llama_index.core.llama_pack.download_llama_pack |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = | SentenceSplitter() | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
import nest_asyncio
nest_asyncio.apply()
import cProfile, pstats
from pstats import SortKey
get_ipython().system('llamaindex-cli download-llamadataset PatronusAIFinanceBenchDataset --download-dir ./data')
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(input_dir="./data/source_files").load_data()
from llama_index.core import Document
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import TitleExtractor
from llama_index.core.ingestion import IngestionPipeline
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(chunk_size=1024, chunk_overlap=20),
| TitleExtractor() | llama_index.core.extractors.TitleExtractor |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core import SimpleDirectoryReader, Document
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
from llama_index.core.evaluation import CorrectnessEvaluator
get_ipython().system("mkdir -p 'data/10k/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'")
uber_docs0 = SimpleDirectoryReader(
input_files=["./data/10k/uber_2021.pdf"]
).load_data()
uber_doc = Document(text="\n\n".join([d.get_content() for d in uber_docs0]))
from llama_index.core.utils import globals_helper
num_tokens = len(globals_helper.tokenizer(uber_doc.get_content()))
print(f"NUM TOKENS: {num_tokens}")
context_str = "Jerry's favorite snack is Hot Cheetos."
query_str = "What is Jerry's favorite snack?"
def augment_doc(doc_str, context, position):
"""Augment doc with additional context at a given position."""
doc_str1 = doc_str[:position]
doc_str2 = doc_str[position:]
return f"{doc_str1}...\n\n{context}\n\n...{doc_str2}"
test_str = augment_doc(
uber_doc.get_content(), context_str, int(0.5 * len(uber_doc.get_content()))
)
async def run_experiments(
doc, position_percentiles, context_str, query, llm, response_mode="compact"
):
eval_llm = OpenAI(model="gpt-4-1106-preview")
correctness_evaluator = CorrectnessEvaluator(llm=eval_llm)
eval_scores = {}
for idx, position_percentile in enumerate(position_percentiles):
print(f"Position percentile: {position_percentile}")
position_idx = int(position_percentile * len(uber_doc.get_content()))
new_doc_str = augment_doc(
uber_doc.get_content(), context_str, position_idx
)
new_doc = Document(text=new_doc_str)
index = SummaryIndex.from_documents(
[new_doc],
)
query_engine = index.as_query_engine(
response_mode=response_mode, llm=llm
)
print(f"Query: {query}")
response = query_engine.query(query)
print(f"Response: {str(response)}")
eval_result = correctness_evaluator.evaluate(
query=query, response=str(response), reference=context_str
)
eval_score = eval_result.score
print(f"Eval score: {eval_score}")
eval_scores[position_percentile] = eval_score
return eval_scores
position_percentiles = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
llm = OpenAI(model="gpt-4-1106-preview")
eval_scores_gpt4 = await run_experiments(
[uber_doc],
position_percentiles,
context_str,
query_str,
llm,
response_mode="compact",
)
llm = OpenAI(model="gpt-4-1106-preview")
eval_scores_gpt4_ts = await run_experiments(
[uber_doc],
position_percentiles,
context_str,
query_str,
llm,
response_mode="tree_summarize",
)
llm = | Anthropic(model="claude-2") | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY')
get_ipython().system('pip install llama-index pypdf')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [
SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes
]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_chunk = | RetrieverQueryEngine.from_args(retriever_chunk, llm=llm) | llama_index.core.query_engine.RetrieverQueryEngine.from_args |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["data/paul_graham/paul_graham_essay.txt"]
).load_data()
from llama_index.core.llama_dataset.generator import RagDatasetGenerator
from llama_index.llms.openai import OpenAI
llm_gpt35 = OpenAI(model="gpt-4", temperature=0.3)
dataset_generator = RagDatasetGenerator.from_documents(
documents,
llm=llm_gpt35,
num_questions_per_chunk=2, # set the number of questions per nodes
show_progress=True,
)
rag_dataset = dataset_generator.generate_dataset_from_nodes()
rag_dataset.save_json("rag_dataset.json")
from llama_index.core import VectorStoreIndex
index = | VectorStoreIndex.from_documents(documents=documents) | llama_index.core.VectorStoreIndex.from_documents |
get_ipython().system('pip install llama-index')
get_ipython().system('pip install duckdb')
get_ipython().system('pip install llama-index-vector-stores-duckdb')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.duckdb import DuckDBVectorStore
from llama_index.core import StorageContext
from IPython.display import Markdown, display
import os
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = DuckDBVectorStore()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
documents = SimpleDirectoryReader("data/paul_graham/").load_data()
vector_store = DuckDBVectorStore("pg.duckdb", persist_dir="./persist/")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
vector_store = DuckDBVectorStore.from_local("./persist/pg.duckdb")
index = VectorStoreIndex.from_vector_store(vector_store)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
**{
"text": "The Shawshank Redemption",
"metadata": {
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
"ref_doc_id": "doc_1",
},
}
),
TextNode(
**{
"text": "The Godfather",
"metadata": {
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
"ref_doc_id": "doc_1",
},
}
),
TextNode(
**{
"text": "Inception",
"metadata": {
"director": "Christopher Nolan",
"theme": "Sci-fi",
"year": 2010,
"ref_doc_id": "doc_2",
},
}
),
]
vector_store = DuckDBVectorStore()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = | VectorStoreIndex(nodes, storage_context=storage_context) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
get_ipython().system('pip install wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import (
VectorStoreIndex,
SimpleKeywordTableIndex,
SimpleDirectoryReader,
)
from llama_index.core import SummaryIndex
from llama_index.core.schema import IndexNode
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.callbacks import CallbackManager
from llama_index.llms.openai import OpenAI
wiki_titles = [
"Toronto",
"Seattle",
"Chicago",
"Boston",
"Houston",
]
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
city_docs = {}
for wiki_title in wiki_titles:
city_docs[wiki_title] = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
callback_manager = CallbackManager([])
from llama_index.agent.openai import OpenAIAgent
from llama_index.core import load_index_from_storage, StorageContext
from llama_index.core.node_parser import SentenceSplitter
import os
node_parser = | SentenceSplitter() | llama_index.core.node_parser.SentenceSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
import nest_asyncio
nest_asyncio.apply()
import os
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
import pandas as pd
def display_eval_df(question, source, answer_a, answer_b, result) -> None:
"""Pretty print question/answer + gpt-4 judgement dataset."""
eval_df = pd.DataFrame(
{
"Question": question,
"Source": source,
"Model A": answer_a["model"],
"Answer A": answer_a["text"],
"Model B": answer_b["model"],
"Answer B": answer_b["text"],
"Score": result.score,
"Judgement": result.feedback,
},
index=[0],
)
eval_df = eval_df.style.set_properties(
**{
"inline-size": "300px",
"overflow-wrap": "break-word",
},
subset=["Answer A", "Answer B"]
)
display(eval_df)
get_ipython().system('pip install wikipedia -q')
from llama_index.readers.wikipedia import WikipediaReader
train_cities = [
"San Francisco",
"Toronto",
"New York",
"Vancouver",
"Montreal",
"Boston",
]
test_cities = [
"Tokyo",
"Singapore",
"Paris",
]
train_documents = WikipediaReader().load_data(
pages=[f"History of {x}" for x in train_cities]
)
test_documents = WikipediaReader().load_data(
pages=[f"History of {x}" for x in test_cities]
)
QUESTION_GEN_PROMPT = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
from llama_index.core.evaluation import DatasetGenerator
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
train_dataset_generator = DatasetGenerator.from_documents(
train_documents,
question_gen_query=QUESTION_GEN_PROMPT,
llm=llm,
show_progress=True,
num_questions_per_chunk=25,
)
test_dataset_generator = DatasetGenerator.from_documents(
test_documents,
question_gen_query=QUESTION_GEN_PROMPT,
llm=llm,
show_progress=True,
num_questions_per_chunk=25,
)
train_questions = train_dataset_generator.generate_questions_from_nodes(
num=200
)
test_questions = test_dataset_generator.generate_questions_from_nodes(num=150)
len(train_questions), len(test_questions)
train_questions[:3]
test_questions[:3]
from llama_index.core import VectorStoreIndex
from llama_index.core.retrievers import VectorIndexRetriever
train_index = VectorStoreIndex.from_documents(documents=train_documents)
train_retriever = VectorIndexRetriever(
index=train_index,
similarity_top_k=2,
)
test_index = VectorStoreIndex.from_documents(documents=test_documents)
test_retriever = VectorIndexRetriever(
index=test_index,
similarity_top_k=2,
)
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
def create_query_engine(
hf_name: str, retriever: VectorIndexRetriever, hf_llm_generators: dict
) -> RetrieverQueryEngine:
"""Create a RetrieverQueryEngine using the HuggingFaceInferenceAPI LLM"""
if hf_name not in hf_llm_generators:
raise KeyError("model not listed in hf_llm_generators")
llm = HuggingFaceInferenceAPI(
model_name=hf_llm_generators[hf_name],
context_window=2048, # to use refine
token=HUGGING_FACE_TOKEN,
)
return RetrieverQueryEngine.from_args(retriever=retriever, llm=llm)
hf_llm_generators = {
"mistral-7b-instruct": "mistralai/Mistral-7B-Instruct-v0.1",
"llama2-7b-chat": "meta-llama/Llama-2-7b-chat-hf",
}
train_query_engines = {
mdl: create_query_engine(mdl, train_retriever, hf_llm_generators)
for mdl in hf_llm_generators.keys()
}
test_query_engines = {
mdl: create_query_engine(mdl, test_retriever, hf_llm_generators)
for mdl in hf_llm_generators.keys()
}
import tqdm
import random
train_dataset = []
for q in tqdm.tqdm(train_questions):
model_versus = random.sample(list(train_query_engines.items()), 2)
data_entry = {"question": q}
responses = []
source = None
for name, engine in model_versus:
response = engine.query(q)
response_struct = {}
response_struct["model"] = name
response_struct["text"] = str(response)
if source is not None:
assert source == response.source_nodes[0].node.text[:1000] + "..."
else:
source = response.source_nodes[0].node.text[:1000] + "..."
responses.append(response_struct)
data_entry["answers"] = responses
data_entry["source"] = source
train_dataset.append(data_entry)
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.evaluation import PairwiseComparisonEvaluator
from llama_index.core import Settings
main_finetuning_handler = OpenAIFineTuningHandler()
callback_manager = CallbackManager([main_finetuning_handler])
Settings.callback_manager = callback_manager
llm_4 = OpenAI(temperature=0, model="gpt-4", callback_manager=callback_manager)
gpt4_judge = PairwiseComparisonEvaluator(llm=llm)
for data_entry in tqdm.tqdm(train_dataset):
final_eval_result = await gpt4_judge.aevaluate(
query=data_entry["question"],
response=data_entry["answers"][0]["text"],
second_response=data_entry["answers"][1]["text"],
reference=data_entry["source"],
)
judgement = {}
judgement["llm"] = "gpt_4"
judgement["score"] = final_eval_result.score
judgement["text"] = final_eval_result.response
judgement["source"] = final_eval_result.pairwise_source
data_entry["evaluations"] = [judgement]
display_eval_df(
question=data_entry["question"],
source=data_entry["source"],
answer_a=data_entry["answers"][0],
answer_b=data_entry["answers"][1],
result=final_eval_result,
)
main_finetuning_handler.save_finetuning_events(
"pairwise_finetuning_events.jsonl"
)
import json
with open("pairwise_finetuning_events.jsonl") as f:
combined_finetuning_events = [json.loads(line) for line in f]
finetuning_events = (
[]
) # for storing events using original order of presentation
flipped_finetuning_events = (
[]
) # for storing events using flipped order of presentation
for ix, event in enumerate(combined_finetuning_events):
if ix % 2 == 0: # we always do original ordering first
finetuning_events += [event]
else: # then we flip order and have GPT-4 make another judgement
flipped_finetuning_events += [event]
assert len(finetuning_events) == len(flipped_finetuning_events)
resolved_finetuning_events = []
for ix, data_entry in enumerate(train_dataset):
if data_entry["evaluations"][0]["source"] == "original":
resolved_finetuning_events += [finetuning_events[ix]]
elif data_entry["evaluations"][0]["source"] == "flipped":
resolved_finetuning_events += [flipped_finetuning_events[ix]]
else:
continue
with open("resolved_pairwise_finetuning_events.jsonl", "w") as outfile:
for entry in resolved_finetuning_events:
print(json.dumps(entry), file=outfile)
from llama_index.finetuning import OpenAIFinetuneEngine
finetune_engine = OpenAIFinetuneEngine(
"gpt-3.5-turbo",
"resolved_pairwise_finetuning_events.jsonl",
)
finetune_engine.finetune()
finetune_engine.get_current_job()
import random
test_dataset = []
for q in tqdm.tqdm(test_questions):
model_versus = random.sample(list(test_query_engines.items()), 2)
data_entry = {"question": q}
responses = []
source = None
for name, engine in model_versus:
response = engine.query(q)
response_struct = {}
response_struct["model"] = name
response_struct["text"] = str(response)
if source is not None:
assert source == response.source_nodes[0].node.text[:1000] + "..."
else:
source = response.source_nodes[0].node.text[:1000] + "..."
responses.append(response_struct)
data_entry["answers"] = responses
data_entry["source"] = source
test_dataset.append(data_entry)
for data_entry in tqdm.tqdm(test_dataset):
final_eval_result = await gpt4_judge.aevaluate(
query=data_entry["question"],
response=data_entry["answers"][0]["text"],
second_response=data_entry["answers"][1]["text"],
reference=data_entry["source"],
)
judgement = {}
judgement["llm"] = "gpt_4"
judgement["score"] = final_eval_result.score
judgement["text"] = final_eval_result.response
judgement["source"] = final_eval_result.pairwise_source
data_entry["evaluations"] = [judgement]
from llama_index.core.evaluation import EvaluationResult
ft_llm = finetune_engine.get_finetuned_model()
ft_gpt_3p5_judge = PairwiseComparisonEvaluator(llm=ft_llm)
for data_entry in tqdm.tqdm(test_dataset):
try:
final_eval_result = await ft_gpt_3p5_judge.aevaluate(
query=data_entry["question"],
response=data_entry["answers"][0]["text"],
second_response=data_entry["answers"][1]["text"],
reference=data_entry["source"],
)
except:
final_eval_result = EvaluationResult(
query=data_entry["question"],
response="",
passing=None,
score=0.5,
feedback="",
pairwise_source="output-cannot-be-parsed",
)
judgement = {}
judgement["llm"] = "ft_gpt_3p5"
judgement["score"] = final_eval_result.score
judgement["text"] = final_eval_result.response
judgement["source"] = final_eval_result.pairwise_source
data_entry["evaluations"] += [judgement]
gpt_3p5_llm = OpenAI(model="gpt-3.5-turbo")
gpt_3p5_judge = | PairwiseComparisonEvaluator(llm=gpt_3p5_llm) | llama_index.core.evaluation.PairwiseComparisonEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
TABLE_NAME = os.environ["DYNAMODB_TABLE_NAME"]
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
from llama_index.storage.index_store.dynamodb import DynamoDBIndexStore
from llama_index.vector_stores.dynamodb import DynamoDBVectorStore
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME),
vector_store=DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME),
vector_store=DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME),
)
summary_index = load_index_from_storage(
storage_context=storage_context, index_id=list_id
)
keyword_table_index = load_index_from_storage(
storage_context=storage_context, index_id=keyword_id
)
vector_index = load_index_from_storage(
storage_context=storage_context, index_id=vector_id
)
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.llm = chatgpt
Settings.chunk_size = 1024
query_engine = summary_index.as_query_engine()
list_response = query_engine.query("What is a summary of this document?")
| display_response(list_response) | llama_index.core.response.notebook_utils.display_response |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('pip install llama-index')
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = | FlatReader() | llama_index.readers.file.FlatReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-tools-metaphor')
get_ipython().system('wget "https://images.openai.com/blob/a2e49de2-ba5b-4869-9c2d-db3b4b5dcc19/new-models-and-developer-products-announced-at-devday.jpg?width=2000" -O other_images/openai/dev_day.png')
get_ipython().system('wget "https://drive.google.com/uc\\?id\\=1B4f5ZSIKN0zTTPPRlZ915Ceb3_uF9Zlq\\&export\\=download" -O other_images/adidas.png')
from llama_index.readers.web import SimpleWebPageReader
url = "https://openai.com/blog/new-models-and-developer-products-announced-at-devday"
reader = SimpleWebPageReader(html_to_text=True)
documents = reader.load_data(urls=[url])
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
vector_index = VectorStoreIndex.from_documents(
documents,
)
query_tool = QueryEngineTool(
query_engine=vector_index.as_query_engine(),
metadata=ToolMetadata(
name=f"vector_tool",
description=(
"Useful to lookup new features announced by OpenAI"
),
),
)
from llama_index.core.agent.react_multimodal.step import (
MultimodalReActAgentWorker,
)
from llama_index.core.agent import AgentRunner
from llama_index.core.multi_modal_llms import MultiModalLLM
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core.agent import Task
mm_llm = OpenAIMultiModal(model="gpt-4-vision-preview", max_new_tokens=1000)
react_step_engine = MultimodalReActAgentWorker.from_tools(
[query_tool],
multi_modal_llm=mm_llm,
verbose=True,
)
agent = AgentRunner(react_step_engine)
query_str = (
"The photo shows some new features released by OpenAI. "
"Can you pinpoint the features in the photo and give more details using relevant tools?"
)
from llama_index.core.schema import ImageDocument
image_document = | ImageDocument(image_path="other_images/openai/dev_day.png") | llama_index.core.schema.ImageDocument |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-elasticsearch')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.elasticsearch import ElasticsearchStore
from llama_index.core import StorageContext
from IPython.display import Markdown, display
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-small-en-v1.5"
)
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = ElasticsearchStore(
index_name="paul_graham_essay", es_url="http://localhost:9200"
)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lancedb')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-clip')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install -U openai-whisper')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install lancedb')
get_ipython().run_line_magic('pip', 'install moviepy')
get_ipython().run_line_magic('pip', 'install pytube')
get_ipython().run_line_magic('pip', 'install pydub')
get_ipython().run_line_magic('pip', 'install SpeechRecognition')
get_ipython().run_line_magic('pip', 'install ffmpeg-python')
get_ipython().run_line_magic('pip', 'install soundfile')
from moviepy.editor import VideoFileClip
from pathlib import Path
import speech_recognition as sr
from pytube import YouTube
from pprint import pprint
import os
OPENAI_API_TOKEN = ""
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
video_url = "https://www.youtube.com/watch?v=d_qvLDhkg00"
output_video_path = "./video_data/"
output_folder = "./mixed_data/"
output_audio_path = "./mixed_data/output_audio.wav"
filepath = output_video_path + "input_vid.mp4"
Path(output_folder).mkdir(parents=True, exist_ok=True)
from PIL import Image
import matplotlib.pyplot as plt
import os
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(2, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 7:
break
def download_video(url, output_path):
"""
Download a video from a given url and save it to the output path.
Parameters:
url (str): The url of the video to download.
output_path (str): The path to save the video to.
Returns:
dict: A dictionary containing the metadata of the video.
"""
yt = YouTube(url)
metadata = {"Author": yt.author, "Title": yt.title, "Views": yt.views}
yt.streams.get_highest_resolution().download(
output_path=output_path, filename="input_vid.mp4"
)
return metadata
def video_to_images(video_path, output_folder):
"""
Convert a video to a sequence of images and save them to the output folder.
Parameters:
video_path (str): The path to the video file.
output_folder (str): The path to the folder to save the images to.
"""
clip = VideoFileClip(video_path)
clip.write_images_sequence(
os.path.join(output_folder, "frame%04d.png"), fps=0.2
)
def video_to_audio(video_path, output_audio_path):
"""
Convert a video to audio and save it to the output path.
Parameters:
video_path (str): The path to the video file.
output_audio_path (str): The path to save the audio to.
"""
clip = VideoFileClip(video_path)
audio = clip.audio
audio.write_audiofile(output_audio_path)
def audio_to_text(audio_path):
"""
Convert audio to text using the SpeechRecognition library.
Parameters:
audio_path (str): The path to the audio file.
Returns:
test (str): The text recognized from the audio.
"""
recognizer = sr.Recognizer()
audio = sr.AudioFile(audio_path)
with audio as source:
audio_data = recognizer.record(source)
try:
text = recognizer.recognize_whisper(audio_data)
except sr.UnknownValueError:
print("Speech recognition could not understand the audio.")
except sr.RequestError as e:
print(f"Could not request results from service; {e}")
return text
try:
metadata_vid = download_video(video_url, output_video_path)
video_to_images(filepath, output_folder)
video_to_audio(filepath, output_audio_path)
text_data = audio_to_text(output_audio_path)
with open(output_folder + "output_text.txt", "w") as file:
file.write(text_data)
print("Text data saved to file")
file.close()
os.remove(output_audio_path)
print("Audio file removed")
except Exception as e:
raise e
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.core import SimpleDirectoryReader
text_store = LanceDBVectorStore(uri="lancedb", table_name="text_collection")
image_store = | LanceDBVectorStore(uri="lancedb", table_name="image_collection") | llama_index.vector_stores.lancedb.LanceDBVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY')
get_ipython().system('pip install llama-index pypdf')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [
SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes
]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = IndexNode.from_text_node(base_node, base_node.node_id)
all_nodes.append(original_node)
all_nodes_dict = {n.node_id: n for n in all_nodes}
vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model)
vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2)
retriever_chunk = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever_chunk},
node_dict=all_nodes_dict,
verbose=True,
)
nodes = retriever_chunk.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for node in nodes:
display_source_node(node, source_length=2000)
query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm)
response = query_engine_chunk.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
)
extractors = [
| SummaryExtractor(summaries=["self"], show_progress=True) | llama_index.core.extractors.SummaryExtractor |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(input_files=["pg_essay.txt"])
documents = reader.load_data()
from llama_index.core.query_pipeline import (
QueryPipeline,
InputComponent,
ArgPackComponent,
)
from typing import Dict, Any, List, Optional
from llama_index.core.llama_pack import BaseLlamaPack
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import Document, VectorStoreIndex
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.schema import NodeWithScore, TextNode
from llama_index.core.node_parser import SentenceSplitter
llm = OpenAI(model="gpt-3.5-turbo")
chunk_sizes = [128, 256, 512, 1024]
query_engines = {}
for chunk_size in chunk_sizes:
splitter = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=0)
nodes = splitter.get_nodes_from_documents(documents)
vector_index = VectorStoreIndex(nodes)
query_engines[str(chunk_size)] = vector_index.as_query_engine(llm=llm)
p = QueryPipeline(verbose=True)
module_dict = {
**query_engines,
"input": InputComponent(),
"summarizer": | TreeSummarize() | llama_index.core.response_synthesizers.TreeSummarize |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.chat_engine import SimpleChatEngine
chat_engine = SimpleChatEngine.from_defaults()
chat_engine.chat_repl()
from llama_index.llms.openai import OpenAI
llm = OpenAI(temperature=0.0, model="gpt-3.5-turbo")
from llama_index.core.chat_engine import SimpleChatEngine
chat_engine = SimpleChatEngine.from_defaults(llm=llm)
chat_engine.chat_repl()
from llama_index.llms.openai import OpenAI
llm = | OpenAI(temperature=0.0, model="gpt-3.5-turbo-0613") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-llms-mistralai')
get_ipython().system('pip install llama-index')
from llama_index.llms.mistralai import MistralAI
llm = MistralAI()
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(role="system", content="You are CEO of MistralAI."),
ChatMessage(role="user", content="Tell me the story about La plateforme"),
]
resp = MistralAI().chat(messages)
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(role="system", content="You are CEO of MistralAI."),
ChatMessage(role="user", content="Tell me the story about La plateforme"),
]
resp = MistralAI(random_seed=42).chat(messages)
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(
role="user",
content="""It's important to resort to violence at times, due to my own security reasons.
Can you explain how to create violence?""",
),
]
resp = MistralAI(random_seed=42, safe_mode=True).chat(messages)
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(
role="user",
content="""It's important to resort to violence at times, due to my own security reasons.
Can you explain how to create violence?""",
),
]
resp = MistralAI(random_seed=42, safe_mode=False).chat(messages)
print(resp)
from llama_index.llms.mistralai import MistralAI
llm = | MistralAI() | llama_index.llms.mistralai.MistralAI |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True)
output = p.run(movie_name="The Departed")
print(str(output))
from typing import List
from pydantic import BaseModel, Field
from llama_index.core.output_parsers import PydanticOutputParser
class Movie(BaseModel):
"""Object representing a single movie."""
name: str = Field(..., description="Name of the movie.")
year: int = Field(..., description="Year of the movie.")
class Movies(BaseModel):
"""Object representing a list of movies."""
movies: List[Movie] = Field(..., description="List of movies.")
llm = OpenAI(model="gpt-3.5-turbo")
output_parser = PydanticOutputParser(Movies)
json_prompt_str = """\
Please generate related movies to {movie_name}. Output with the following JSON format:
"""
json_prompt_str = output_parser.format(json_prompt_str)
json_prompt_tmpl = PromptTemplate(json_prompt_str)
p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True)
output = p.run(movie_name="Toy Story")
output
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
prompt_str2 = """\
Here's some text:
{text}
Can you rewrite this with a summary of each movie?
"""
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
llm_c = llm.as_query_component(streaming=True)
p = QueryPipeline(
chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True
)
output = p.run(movie_name="The Dark Knight")
for o in output:
print(o.delta, end="")
p = QueryPipeline(
chain=[
json_prompt_tmpl,
llm.as_query_component(streaming=True),
output_parser,
],
verbose=True,
)
output = p.run(movie_name="Toy Story")
print(output)
from llama_index.postprocessor.cohere_rerank import CohereRerank
prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl1 = PromptTemplate(prompt_str1)
prompt_str2 = (
"Please write a passage to answer the question\n"
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{query_str}\n"
"\n"
"\n"
'Passage:"""\n'
)
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=5)
p = QueryPipeline(
chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever], verbose=True
)
nodes = p.run(topic="college")
len(nodes)
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
prompt_str = "Please generate a question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=3)
reranker = CohereRerank()
summarizer = TreeSummarize(llm=llm)
p = QueryPipeline(verbose=True)
p.add_modules(
{
"llm": llm,
"prompt_tmpl": prompt_tmpl,
"retriever": retriever,
"summarizer": summarizer,
"reranker": reranker,
}
)
p.add_link("prompt_tmpl", "llm")
p.add_link("llm", "retriever")
p.add_link("retriever", "reranker", dest_key="nodes")
p.add_link("llm", "reranker", dest_key="query_str")
p.add_link("reranker", "summarizer", dest_key="nodes")
p.add_link("llm", "summarizer", dest_key="query_str")
print(summarizer.as_query_component().input_keys)
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(p.dag)
net.show("rag_dag.html")
response = p.run(topic="YC")
print(str(response))
response = await p.arun(topic="YC")
print(str(response))
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.query_pipeline import InputComponent
retriever = index.as_retriever(similarity_top_k=5)
summarizer = TreeSummarize(llm=OpenAI(model="gpt-3.5-turbo"))
reranker = CohereRerank()
p = QueryPipeline(verbose=True)
p.add_modules(
{
"input": | InputComponent() | llama_index.core.query_pipeline.InputComponent |
get_ipython().run_line_magic('pip', 'install llama-index-readers-notion')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
get_ipython().system('pip install llama-index')
from llama_index.core import SummaryIndex
from llama_index.readers.notion import NotionPageReader
from IPython.display import Markdown, display
import os
integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
page_ids = ["<page_id>"]
documents = NotionPageReader(integration_token=integration_token).load_data(
page_ids=page_ids
)
index = SummaryIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("<query_text>")
display(Markdown(f"<b>{response}</b>"))
database_id = "<database-id>"
documents = | NotionPageReader(integration_token=integration_token) | llama_index.readers.notion.NotionPageReader |
get_ipython().run_line_magic('pip', 'install llama-index-evaluation-tonic-validate')
import json
import pandas as pd
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.evaluation.tonic_validate import (
AnswerConsistencyEvaluator,
AnswerSimilarityEvaluator,
AugmentationAccuracyEvaluator,
AugmentationPrecisionEvaluator,
RetrievalPrecisionEvaluator,
TonicValidateEvaluator,
)
question = "What makes Sam Altman a good founder?"
reference_answer = "He is smart and has a great force of will."
llm_answer = "He is a good founder because he is smart."
retrieved_context_list = [
"Sam Altman is a good founder. He is very smart.",
"What makes Sam Altman such a good founder is his great force of will.",
]
answer_similarity_evaluator = AnswerSimilarityEvaluator()
score = await answer_similarity_evaluator.aevaluate(
question,
llm_answer,
retrieved_context_list,
reference_response=reference_answer,
)
score
answer_consistency_evaluator = AnswerConsistencyEvaluator()
score = await answer_consistency_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
augmentation_accuracy_evaluator = AugmentationAccuracyEvaluator()
score = await augmentation_accuracy_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
augmentation_precision_evaluator = | AugmentationPrecisionEvaluator() | llama_index.evaluation.tonic_validate.AugmentationPrecisionEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().system('pip install llama-index')
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
pprint_response,
)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents=documents)
import os
from llama_index.postprocessor.cohere_rerank import CohereRerank
api_key = os.environ["COHERE_API_KEY"]
cohere_rerank = CohereRerank(api_key=api_key, top_n=2)
query_engine = index.as_query_engine(
similarity_top_k=10,
node_postprocessors=[cohere_rerank],
)
response = query_engine.query(
"What did Sam Altman do in this essay?",
)
| pprint_response(response) | llama_index.core.pprint_response |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.database.base import DatabaseToolSpec
db_spec = DatabaseToolSpec(
scheme="postgresql", # Database Scheme
host="localhost", # Database Host
port="5432", # Database Port
user="postgres", # Database User
password="x", # Database Password
dbname="your_db", # Database Name
)
tools = db_spec.to_tool_list()
for tool in tools:
print(tool.metadata.name)
print(tool.metadata.description)
print(tool.metadata.fn_schema)
agent = | OpenAIAgent.from_tools(tools, verbose=True) | llama_index.agent.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = CohereRerank(top_n=10)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
response = query_engine.query(
"Tell me about the main aspects of safety fine-tuning"
)
display_response(
response, show_source=True, source_length=500, show_source_metadata=True
)
from collections import defaultdict
import pandas as pd
def mrr_all(metadata_values, metadata_key, source_nodes):
value_to_mrr_dict = {}
for metadata_value in metadata_values:
mrr = 0
for idx, source_node in enumerate(source_nodes):
if source_node.node.metadata[metadata_key] == metadata_value:
mrr = 1 / (idx + 1)
break
else:
continue
value_to_mrr_dict[metadata_value] = mrr
df = pd.DataFrame(value_to_mrr_dict, index=["MRR"])
df.style.set_caption("Mean Reciprocal Rank")
return df
print("Mean Reciprocal Rank for each Chunk Size")
mrr_all(chunk_sizes, "chunk_size", response.source_nodes)
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
eval_llm = OpenAI(model="gpt-4")
dataset_generator = DatasetGenerator(
nodes_list[-1],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=2,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60)
eval_dataset.save_json("data/llama2_eval_qr_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/llama2_eval_qr_dataset.json"
)
import asyncio
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
RelevancyEvaluator,
FaithfulnessEvaluator,
PairwiseComparisonEvaluator,
)
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = | SemanticSimilarityEvaluator(llm=eval_llm) | llama_index.core.evaluation.SemanticSimilarityEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index llama-hub')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
domain = "docs.llamaindex.ai"
docs_url = "https://docs.llamaindex.ai/en/latest/"
get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}')
from llama_index.readers.file import UnstructuredReader
reader = UnstructuredReader()
from pathlib import Path
all_files_gen = Path("./docs.llamaindex.ai/").rglob("*")
all_files = [f.resolve() for f in all_files_gen]
all_html_files = [f for f in all_files if f.suffix.lower() == ".html"]
len(all_html_files)
from llama_index.core import Document
doc_limit = 100
docs = []
for idx, f in enumerate(all_html_files):
if idx > doc_limit:
break
print(f"Idx {idx}/{len(all_html_files)}")
loaded_docs = reader.load_data(file=f, split_documents=True)
start_idx = 72
loaded_doc = Document(
text="\n\n".join([d.get_content() for d in loaded_docs[72:]]),
metadata={"path": str(f)},
)
print(loaded_doc.metadata["path"])
docs.append(loaded_doc)
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import nest_asyncio
nest_asyncio.apply()
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.agent.openai import OpenAIAgent
from llama_index.core import (
load_index_from_storage,
StorageContext,
VectorStoreIndex,
)
from llama_index.core import SummaryIndex
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.node_parser import SentenceSplitter
import os
from tqdm.notebook import tqdm
import pickle
async def build_agent_per_doc(nodes, file_base):
print(file_base)
vi_out_path = f"./data/llamaindex_docs/{file_base}"
summary_out_path = f"./data/llamaindex_docs/{file_base}_summary.pkl"
if not os.path.exists(vi_out_path):
Path("./data/llamaindex_docs/").mkdir(parents=True, exist_ok=True)
vector_index = VectorStoreIndex(nodes)
vector_index.storage_context.persist(persist_dir=vi_out_path)
else:
vector_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=vi_out_path),
)
summary_index = SummaryIndex(nodes)
vector_query_engine = vector_index.as_query_engine(llm=llm)
summary_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize", llm=llm
)
if not os.path.exists(summary_out_path):
Path(summary_out_path).parent.mkdir(parents=True, exist_ok=True)
summary = str(
await summary_query_engine.aquery(
"Extract a concise 1-2 line summary of this document"
)
)
pickle.dump(summary, open(summary_out_path, "wb"))
else:
summary = pickle.load(open(summary_out_path, "rb"))
query_engine_tools = [
QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name=f"vector_tool_{file_base}",
description=f"Useful for questions related to specific facts",
),
),
QueryEngineTool(
query_engine=summary_query_engine,
metadata=ToolMetadata(
name=f"summary_tool_{file_base}",
description=f"Useful for summarization questions",
),
),
]
function_llm = OpenAI(model="gpt-4")
agent = OpenAIAgent.from_tools(
query_engine_tools,
llm=function_llm,
verbose=True,
system_prompt=f"""\
You are a specialized agent designed to answer queries about the `{file_base}.html` part of the LlamaIndex docs.
You must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge.\
""",
)
return agent, summary
async def build_agents(docs):
node_parser = SentenceSplitter()
agents_dict = {}
extra_info_dict = {}
for idx, doc in enumerate(tqdm(docs)):
nodes = node_parser.get_nodes_from_documents([doc])
file_path = Path(doc.metadata["path"])
file_base = str(file_path.parent.stem) + "_" + str(file_path.stem)
agent, summary = await build_agent_per_doc(nodes, file_base)
agents_dict[file_base] = agent
extra_info_dict[file_base] = {"summary": summary, "nodes": nodes}
return agents_dict, extra_info_dict
agents_dict, extra_info_dict = await build_agents(docs)
all_tools = []
for file_base, agent in agents_dict.items():
summary = extra_info_dict[file_base]["summary"]
doc_tool = QueryEngineTool(
query_engine=agent,
metadata=ToolMetadata(
name=f"tool_{file_base}",
description=summary,
),
)
all_tools.append(doc_tool)
print(all_tools[0].metadata)
from llama_index.core import VectorStoreIndex
from llama_index.core.objects import (
ObjectIndex,
SimpleToolNodeMapping,
ObjectRetriever,
)
from llama_index.core.retrievers import BaseRetriever
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.query_engine import SubQuestionQueryEngine
from llama_index.llms.openai import OpenAI
llm = OpenAI(model_name="gpt-4-0613")
tool_mapping = | SimpleToolNodeMapping.from_objects(all_tools) | llama_index.core.objects.SimpleToolNodeMapping.from_objects |
import os
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
from llama_index.llms.openai import OpenAI
llm = OpenAI("gpt-4")
from llama_index.core.llama_pack import download_llama_pack
SelfDiscoverPack = download_llama_pack("SelfDiscoverPack", "./self_discover_pack")
self_discover_pack = SelfDiscoverPack(verbose=True, llm=llm)
from llama_index.packs.self_discover import SelfDiscoverPack
self_discover_pack = | SelfDiscoverPack(verbose=True, llm=llm) | llama_index.packs.self_discover.SelfDiscoverPack |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm')
get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git')
get_ipython().run_line_magic('pip', 'install torch torchvision')
get_ipython().run_line_magic('pip', 'install matplotlib scikit-image')
get_ipython().run_line_magic('pip', 'install -U qdrant_client')
import os
OPENAI_API_TOKEN = ""
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from pathlib import Path
input_image_path = Path("input_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1nUhsBRiSWxcVQv8t8Cvvro8HJZ88LCzj" -O ./input_images/long_range_spec.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=19pLwx0nVqsop7lo0ubUSYTzQfMtKJJtJ" -O ./input_images/model_y.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1utu3iD9XEgR5Sb7PrbtMf1qw8T1WdNmF" -O ./input_images/performance_spec.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1dpUakWMqaXR4Jjn1kHuZfB0pAXvjn2-i" -O ./input_images/price.png')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1qNeT201QAesnAP5va1ty0Ky5Q_jKkguV" -O ./input_images/real_wheel_spec.png')
from PIL import Image
import matplotlib.pyplot as plt
import os
image_paths = []
for img_path in os.listdir("./input_images"):
image_paths.append(str(os.path.join("./input_images", img_path)))
def plot_images(image_paths):
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(2, 3, images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= 9:
break
plot_images(image_paths)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core import SimpleDirectoryReader
image_documents = | SimpleDirectoryReader("./input_images") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-postgres')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-cpp')
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
get_ipython().system('pip install llama-cpp-python')
from llama_index.llms.llama_cpp import LlamaCPP
model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf"
llm = LlamaCPP(
model_url=model_url,
model_path=None,
temperature=0.1,
max_new_tokens=256,
context_window=3900,
generate_kwargs={},
model_kwargs={"n_gpu_layers": 1},
verbose=True,
)
get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet')
import psycopg2
db_name = "vector_db"
host = "localhost"
password = "password"
port = "5432"
user = "jerry"
conn = psycopg2.connect(
dbname="postgres",
host=host,
password=password,
port=port,
user=user,
)
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {db_name}")
c.execute(f"CREATE DATABASE {db_name}")
from sqlalchemy import make_url
from llama_index.vector_stores.postgres import PGVectorStore
vector_store = PGVectorStore.from_params(
database=db_name,
host=host,
password=password,
port=port,
user=user,
table_name="llama2_paper",
embed_dim=384, # openai embedding dimension
)
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core.node_parser import SentenceSplitter
text_parser = SentenceSplitter(
chunk_size=1024,
)
text_chunks = []
doc_idxs = []
for doc_idx, doc in enumerate(documents):
cur_text_chunks = text_parser.split_text(doc.text)
text_chunks.extend(cur_text_chunks)
doc_idxs.extend([doc_idx] * len(cur_text_chunks))
from llama_index.core.schema import TextNode
nodes = []
for idx, text_chunk in enumerate(text_chunks):
node = TextNode(
text=text_chunk,
)
src_doc = documents[doc_idxs[idx]]
node.metadata = src_doc.metadata
nodes.append(node)
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
vector_store.add(nodes)
query_str = "Can you tell me about the key concepts for safety finetuning"
query_embedding = embed_model.get_query_embedding(query_str)
from llama_index.core.vector_stores import VectorStoreQuery
query_mode = "default"
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, mode=query_mode
)
query_result = vector_store.query(vector_store_query)
print(query_result.nodes[0].get_content())
from llama_index.core.schema import NodeWithScore
from typing import Optional
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
from llama_index.core import QueryBundle
from llama_index.core.retrievers import BaseRetriever
from typing import Any, List
class VectorDBRetriever(BaseRetriever):
"""Retriever over a postgres vector store."""
def __init__(
self,
vector_store: PGVectorStore,
embed_model: Any,
query_mode: str = "default",
similarity_top_k: int = 2,
) -> None:
"""Init params."""
self._vector_store = vector_store
self._embed_model = embed_model
self._query_mode = query_mode
self._similarity_top_k = similarity_top_k
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
query_embedding = embed_model.get_query_embedding(
query_bundle.query_str
)
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=self._similarity_top_k,
mode=self._query_mode,
)
query_result = vector_store.query(vector_store_query)
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append( | NodeWithScore(node=node, score=score) | llama_index.core.schema.NodeWithScore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic')
get_ipython().system('pip install llama-index')
from llama_index.llms.anthropic import Anthropic
from llama_index.core import Settings
tokenizer = Anthropic().tokenizer
Settings.tokenizer = tokenizer
import os
os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY"
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229")
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.anthropic import Anthropic
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = Anthropic(model="claude-3-opus-20240229").chat(messages)
print(resp)
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229", max_tokens=100)
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.anthropic import Anthropic
llm = Anthropic(model="claude-3-opus-20240229")
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
from llama_index.llms.anthropic import Anthropic
llm = | Anthropic(model="claude-3-sonnet-20240229") | llama_index.llms.anthropic.Anthropic |
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm')
get_ipython().system('pip install llama-index')
import os
from llama_index.llms.litellm import LiteLLM
from llama_index.core.llms import ChatMessage
os.environ["OPENAI_API_KEY"] = "your-api-key"
os.environ["COHERE_API_KEY"] = "your-api-key"
message = ChatMessage(role="user", content="Hey! how's it going?")
llm = LiteLLM("gpt-3.5-turbo")
chat_response = llm.chat([message])
llm = LiteLLM("command-nightly")
chat_response = llm.chat([message])
from llama_index.core.llms import ChatMessage
from llama_index.llms.litellm import LiteLLM
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
| ChatMessage(role="user", content="Tell me a story") | llama_index.core.llms.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core.ingestion.cache import RedisCache
from llama_index.core.ingestion import IngestionCache
ingest_cache = IngestionCache(
cache=RedisCache.from_host_and_port(host="127.0.0.1", port=6379),
collection="my_test_cache",
)
get_ipython().system('pip install weaviate-client')
import weaviate
auth_config = weaviate.AuthApiKey(api_key="...")
client = weaviate.Client(url="https://...", auth_client_secret=auth_config)
from llama_index.vector_stores.weaviate import WeaviateVectorStore
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name="CachingTest"
)
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
text_splitter = | TokenTextSplitter(chunk_size=512) | llama_index.core.node_parser.TokenTextSplitter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
len(eval_dataset.qr_pairs)
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
nodes = pipeline.run(documents=docs)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_eval_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_responses_ref
)
return eval_results
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0)
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600)
html_parser = HTMLNodeParser.from_defaults()
parser_dict = {
"sent_parser_o0": sent_parser_o0,
"sent_parser_o200": sent_parser_o200,
"sent_parser_o500": sent_parser_o500,
}
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
pipeline_dict = {}
for k, parser in parser_dict.items():
pipeline = IngestionPipeline(
documents=docs,
transformations=[
html_parser,
parser,
OpenAIEmbedding(),
],
)
pipeline_dict[k] = pipeline
eval_results_dict = {}
for k, pipeline in pipeline_dict.items():
eval_results = await run_evals(
pipeline, batch_eval_runner, docs, eval_qs, ref_response_strs
)
eval_results_dict[k] = eval_results
import pickle
pickle.dump(eval_results_dict, open("eval_results_1.pkl", "wb"))
eval_results_list = list(eval_results_dict.items())
results_df = get_results_df(
[v for _, v in eval_results_list],
[k for k, _ in eval_results_list],
["correctness", "semantic_similarity"],
)
display(results_df)
for k, pipeline in pipeline_dict.items():
pipeline.cache.persist(f"./cache/{k}.json")
from llama_index.core.extractors import (
TitleExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
)
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
extractor_dict = {
"summary": SummaryExtractor(in_place=False),
"qa": | QuestionsAnsweredExtractor(in_place=False) | llama_index.core.extractors.QuestionsAnsweredExtractor |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
from llama_index.core import StorageContext, VectorStoreIndex
from llama_index.core import SummaryIndex
Settings.llm = OpenAI()
Settings.chunk_size = 1024
nodes = Settings.node_parser.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
summary_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = vector_index.as_query_engine()
from llama_index.core.tools import QueryEngineTool
summary_tool = QueryEngineTool.from_defaults(
query_engine=summary_query_engine,
name="summary_tool",
description=(
"Useful for summarization questions related to the author's life"
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
name="vector_tool",
description=(
"Useful for retrieving specific context to answer specific questions about the author's life"
),
)
from llama_index.agent.openai import OpenAIAssistantAgent
agent = OpenAIAssistantAgent.from_new(
name="QA bot",
instructions="You are a bot designed to answer questions about the author",
openai_tools=[],
tools=[summary_tool, vector_tool],
verbose=True,
run_retrieve_sleep_time=1.0,
)
response = agent.chat("Can you give me a summary about the author's life?")
print(str(response))
response = agent.query("What did the author do after RICS?")
print(str(response))
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp")
try:
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
except Exception:
pass
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True, namespace="test")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text=(
"Michael Jordan is a retired professional basketball player,"
" widely regarded as one of the greatest basketball players of all"
" time."
),
metadata={
"category": "Sports",
"country": "United States",
},
),
TextNode(
text=(
"Angelina Jolie is an American actress, filmmaker, and"
" humanitarian. She has received numerous awards for her acting"
" and is known for her philanthropic work."
),
metadata={
"category": "Entertainment",
"country": "United States",
},
),
TextNode(
text=(
"Elon Musk is a business magnate, industrial designer, and"
" engineer. He is the founder, CEO, and lead designer of SpaceX,"
" Tesla, Inc., Neuralink, and The Boring Company."
),
metadata={
"category": "Business",
"country": "United States",
},
),
TextNode(
text=(
"Rihanna is a Barbadian singer, actress, and businesswoman. She"
" has achieved significant success in the music industry and is"
" known for her versatile musical style."
),
metadata={
"category": "Music",
"country": "Barbados",
},
),
TextNode(
text=(
"Cristiano Ronaldo is a Portuguese professional footballer who is"
" considered one of the greatest football players of all time. He"
" has won numerous awards and set multiple records during his"
" career."
),
metadata={
"category": "Sports",
"country": "Portugal",
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="test"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.tools import FunctionTool
from llama_index.core.vector_stores import (
VectorStoreInfo,
MetadataInfo,
ExactMatchFilter,
MetadataFilters,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from typing import List, Tuple, Any
from pydantic import BaseModel, Field
top_k = 3
vector_store_info = VectorStoreInfo(
content_info="brief biography of celebrities",
metadata_info=[
MetadataInfo(
name="category",
type="str",
description=(
"Category of the celebrity, one of [Sports, Entertainment,"
" Business, Music]"
),
),
MetadataInfo(
name="country",
type="str",
description=(
"Country of the celebrity, one of [United States, Barbados,"
" Portugal]"
),
),
],
)
class AutoRetrieveModel(BaseModel):
query: str = Field(..., description="natural language query string")
filter_key_list: List[str] = Field(
..., description="List of metadata filter field names"
)
filter_value_list: List[str] = Field(
...,
description=(
"List of metadata filter field values (corresponding to names"
" specified in filter_key_list)"
),
)
def auto_retrieve_fn(
query: str, filter_key_list: List[str], filter_value_list: List[str]
):
"""Auto retrieval function.
Performs auto-retrieval from a vector database, and then applies a set of filters.
"""
query = query or "Query"
exact_match_filters = [
ExactMatchFilter(key=k, value=v)
for k, v in zip(filter_key_list, filter_value_list)
]
retriever = VectorIndexRetriever(
index,
filters=MetadataFilters(filters=exact_match_filters),
top_k=top_k,
)
results = retriever.retrieve(query)
return [r.get_content() for r in results]
description = f"""\
Use this tool to look up biographical information about celebrities.
The vector database schema is given below:
{vector_store_info.json()}
"""
auto_retrieve_tool = FunctionTool.from_defaults(
fn=auto_retrieve_fn,
name="celebrity_bios",
description=description,
fn_schema=AutoRetrieveModel,
)
auto_retrieve_fn(
"celebrity from the United States",
filter_key_list=["country"],
filter_value_list=["United States"],
)
from llama_index.agent.openai import OpenAIAssistantAgent
agent = OpenAIAssistantAgent.from_new(
name="Celebrity bot",
instructions="You are a bot designed to answer questions about celebrities.",
tools=[auto_retrieve_tool],
verbose=True,
)
response = agent.chat("Tell me about two celebrities from the United States. ")
print(str(response))
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
from llama_index.core import SQLDatabase
from llama_index.core.indices import SQLStructStoreIndex
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
from llama_index.core.query_engine import NLSQLTableQueryEngine
query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["city_stats"],
)
get_ipython().system('pip install wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = WikipediaReader().load_data(pages=cities)
from llama_index.core import Settings
from llama_index.core import StorageContext
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.llms.openai import OpenAI
Settings.chunk_size = 1024
Settings.llm = OpenAI(temperature=0, model="gpt-4")
text_splitter = TokenTextSplitter(chunk_size=1024)
storage_context = StorageContext.from_defaults()
vector_index = | VectorStoreIndex([], storage_context=storage_context) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai-legacy')
get_ipython().system('pip install llama-index')
import json
from typing import Sequence
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/march"
)
march_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/june"
)
june_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/sept"
)
sept_index = load_index_from_storage(storage_context)
index_loaded = True
except:
index_loaded = False
get_ipython().system("mkdir -p 'data/10q/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'")
if not index_loaded:
march_docs = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_march_2022.pdf"]
).load_data()
june_docs = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_june_2022.pdf"]
).load_data()
sept_docs = SimpleDirectoryReader(
input_files=["./data/10q/uber_10q_sept_2022.pdf"]
).load_data()
march_index = VectorStoreIndex.from_documents(march_docs)
june_index = VectorStoreIndex.from_documents(june_docs)
sept_index = VectorStoreIndex.from_documents(sept_docs)
march_index.storage_context.persist(persist_dir="./storage/march")
june_index.storage_context.persist(persist_dir="./storage/june")
sept_index.storage_context.persist(persist_dir="./storage/sept")
march_engine = march_index.as_query_engine(similarity_top_k=3)
june_engine = june_index.as_query_engine(similarity_top_k=3)
sept_engine = sept_index.as_query_engine(similarity_top_k=3)
query_engine_tools = [
QueryEngineTool(
query_engine=march_engine,
metadata=ToolMetadata(
name="uber_march_10q",
description=(
"Provides information about Uber 10Q filings for March 2022. "
"Use a detailed plain text question as input to the tool."
),
),
),
QueryEngineTool(
query_engine=june_engine,
metadata=ToolMetadata(
name="uber_june_10q",
description=(
"Provides information about Uber financials for June 2021. "
"Use a detailed plain text question as input to the tool."
),
),
),
QueryEngineTool(
query_engine=sept_engine,
metadata=ToolMetadata(
name="uber_sept_10q",
description=(
"Provides information about Uber financials for Sept 2021. "
"Use a detailed plain text question as input to the tool."
),
),
),
]
from llama_index.core import Document
from llama_index.agent.openai_legacy import ContextRetrieverOpenAIAgent
texts = [
"Abbreviation: X = Revenue",
"Abbreviation: YZ = Risk Factors",
"Abbreviation: Z = Costs",
]
docs = [ | Document(text=t) | llama_index.core.Document |
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
Settings.chunk_size = 1024
nodes = Settings.node_parser.get_nodes_from_documents(documents)
storage_context = | StorageContext.from_defaults() | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = | OpenAI(model="gpt-3.5-turbo-1106") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-milvus')
get_ipython().system(' pip install llama-index')
import logging
import sys
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.vector_stores.milvus import MilvusVectorStore
from IPython.display import Markdown, display
import textwrap
import openai
openai.api_key = "sk-"
get_ipython().system(" mkdir -p 'data/paul_graham/'")
get_ipython().system(" wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print("Document ID:", documents[0].doc_id)
from llama_index.core import StorageContext
vector_store = MilvusVectorStore(dim=1536, overwrite=True)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-extractors-entity')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY_HERE"
from llama_index.llms.openai import OpenAI
from llama_index.core.schema import MetadataMode
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512)
from llama_index.core.extractors import (
SummaryExtractor,
QuestionsAnsweredExtractor,
TitleExtractor,
KeywordExtractor,
BaseExtractor,
)
from llama_index.extractors.entity import EntityExtractor
from llama_index.core.node_parser import TokenTextSplitter
text_splitter = TokenTextSplitter(
separator=" ", chunk_size=512, chunk_overlap=128
)
class CustomExtractor(BaseExtractor):
def extract(self, nodes):
metadata_list = [
{
"custom": (
node.metadata["document_title"]
+ "\n"
+ node.metadata["excerpt_keywords"]
)
}
for node in nodes
]
return metadata_list
extractors = [
| TitleExtractor(nodes=5, llm=llm) | llama_index.core.extractors.TitleExtractor |
get_ipython().system('pip install llama-index-postprocessor-jinaai-rerank')
get_ipython().system('pip install llama-index-embeddings-jinaai')
get_ipython().system('pip install llama-index')
import os
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
)
from llama_index.embeddings.jinaai import JinaEmbedding
api_key = os.environ["JINA_API_KEY"]
jina_embeddings = JinaEmbedding(api_key=api_key)
import requests
url = "https://niketeam-asset-download.nike.net/catalogs/2024/2024_Nike%20Kids_02_09_24.pdf?cb=09302022"
response = requests.get(url)
with open("Nike_Catalog.pdf", "wb") as f:
f.write(response.content)
reader = | SimpleDirectoryReader(input_files=["Nike_Catalog.pdf"]) | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4")
node_parser = SentenceSplitter(chunk_size=1024)
nodes = node_parser.get_nodes_from_documents(documents)
index = VectorStoreIndex(nodes)
query_engine = index.as_query_engine(llm=llm)
from llama_index.core.schema import BaseNode
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core import ChatPromptTemplate, PromptTemplate
from typing import Tuple, List
import re
llm = OpenAI(model="gpt-4")
QA_PROMPT = PromptTemplate(
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Query: {query_str}\n"
"Answer: "
)
def generate_answers_for_questions(
questions: List[str], context: str, llm: OpenAI
) -> str:
"""Generate answers for questions given context."""
answers = []
for question in questions:
fmt_qa_prompt = QA_PROMPT.format(
context_str=context, query_str=question
)
response_obj = llm.complete(fmt_qa_prompt)
answers.append(str(response_obj))
return answers
QUESTION_GEN_USER_TMPL = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"generate the relevant questions. "
)
QUESTION_GEN_SYS_TMPL = """\
You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided.\
"""
question_gen_template = ChatPromptTemplate(
message_templates=[
ChatMessage(role=MessageRole.SYSTEM, content=QUESTION_GEN_SYS_TMPL),
ChatMessage(role=MessageRole.USER, content=QUESTION_GEN_USER_TMPL),
]
)
def generate_qa_pairs(
nodes: List[BaseNode], llm: OpenAI, num_questions_per_chunk: int = 10
) -> List[Tuple[str, str]]:
"""Generate questions."""
qa_pairs = []
for idx, node in enumerate(nodes):
print(f"Node {idx}/{len(nodes)}")
context_str = node.get_content(metadata_mode="all")
fmt_messages = question_gen_template.format_messages(
num_questions_per_chunk=10,
context_str=context_str,
)
chat_response = llm.chat(fmt_messages)
raw_output = chat_response.message.content
result_list = str(raw_output).strip().split("\n")
cleaned_questions = [
re.sub(r"^\d+[\).\s]", "", question).strip()
for question in result_list
]
answers = generate_answers_for_questions(
cleaned_questions, context_str, llm
)
cur_qa_pairs = list(zip(cleaned_questions, answers))
qa_pairs.extend(cur_qa_pairs)
return qa_pairs
qa_pairs
qa_pairs = generate_qa_pairs(
nodes,
llm,
num_questions_per_chunk=10,
)
import pickle
pickle.dump(qa_pairs, open("eval_dataset.pkl", "wb"))
import pickle
qa_pairs = pickle.load(open("eval_dataset.pkl", "rb"))
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core import ChatPromptTemplate, PromptTemplate
from typing import Dict
CORRECTNESS_SYS_TMPL = """
You are an expert evaluation system for a question answering chatbot.
You are given the following information:
- a user query,
- a reference answer, and
- a generated answer.
Your job is to judge the relevance and correctness of the generated answer.
Output a single score that represents a holistic evaluation.
You must return your response in a line with only the score.
Do not return answers in any other format.
On a separate line provide your reasoning for the score as well.
Follow these guidelines for scoring:
- Your score has to be between 1 and 5, where 1 is the worst and 5 is the best.
- If the generated answer is not relevant to the user query, \
you should give a score of 1.
- If the generated answer is relevant but contains mistakes, \
you should give a score between 2 and 3.
- If the generated answer is relevant and fully correct, \
you should give a score between 4 and 5.
"""
CORRECTNESS_USER_TMPL = """
{query}
{reference_answer}
{generated_answer}
"""
eval_chat_template = ChatPromptTemplate(
message_templates=[
ChatMessage(role=MessageRole.SYSTEM, content=CORRECTNESS_SYS_TMPL),
ChatMessage(role=MessageRole.USER, content=CORRECTNESS_USER_TMPL),
]
)
from llama_index.llms.openai import OpenAI
def run_correctness_eval(
query_str: str,
reference_answer: str,
generated_answer: str,
llm: OpenAI,
threshold: float = 4.0,
) -> Dict:
"""Run correctness eval."""
fmt_messages = eval_chat_template.format_messages(
llm=llm,
query=query_str,
reference_answer=reference_answer,
generated_answer=generated_answer,
)
chat_response = llm.chat(fmt_messages)
raw_output = chat_response.message.content
score_str, reasoning_str = raw_output.split("\n", 1)
score = float(score_str)
reasoning = reasoning_str.lstrip("\n")
return {"passing": score >= threshold, "score": score, "reason": reasoning}
llm = | OpenAI(model="gpt-4") | llama_index.llms.openai.OpenAI |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lantern')
get_ipython().system('pip install llama-index psycopg2-binary asyncpg')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
os.environ["OPENAI_API_KEY"] = "<your-api-key>"
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
import psycopg2
from sqlalchemy import make_url
connection_string = "postgresql://postgres:postgres@localhost:5432"
url = make_url(connection_string)
db_name = "postgres"
conn = psycopg2.connect(connection_string)
conn.autocommit = True
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.lantern import LanternVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text=(
"Michael Jordan is a retired professional basketball player,"
" widely regarded as one of the greatest basketball players of all"
" time."
),
metadata={
"category": "Sports",
"country": "United States",
},
),
| TextNode(
text=(
"Angelina Jolie is an American actress, filmmaker, and"
" humanitarian. She has received numerous awards for her acting"
" and is known for her philanthropic work."
) | llama_index.core.schema.TextNode |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system('pip install "llama_index>=0.9.7"')
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.extractors import TitleExtractor, SummaryExtractor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
def build_pipeline():
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
transformations = [
SentenceSplitter(chunk_size=1024, chunk_overlap=20),
TitleExtractor(
llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8
),
SummaryExtractor(
llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8
),
OpenAIEmbedding(),
]
return IngestionPipeline(transformations=transformations)
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
import time
times = []
for _ in range(3):
time.sleep(30) # help prevent rate-limits/timeouts, keeps each run fair
pipline = build_pipeline()
start = time.time()
nodes = await pipline.arun(documents=documents)
end = time.time()
times.append(end - start)
print(f"Average time: {sum(times) / len(times)}")
get_ipython().system('pip install "llama_index<0.9.6"')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.extractors import TitleExtractor, SummaryExtractor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
def build_pipeline():
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
transformations = [
| SentenceSplitter(chunk_size=1024, chunk_overlap=20) | llama_index.core.node_parser.SentenceSplitter |