code
stringlengths 161
67.2k
| apis
sequencelengths 1
24
| extract_api
stringlengths 164
53.3k
|
---|---|---|
import json
import logging
import os
import re
import time
import xml.etree.ElementTree as ET
from collections import defaultdict, deque
from functools import partial
import backoff
import llama_index
import markdown
import openai
import tiktoken
from colorama import Fore
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from llama_index import (
Document,
GPTListIndex,
GPTVectorStoreIndex,
LLMPredictor,
ResponseSynthesizer,
ServiceContext,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
from llama_index.indices.composability import ComposableGraph
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.retrievers import VectorIndexRetriever
from api.mygene_api import mygene_api
from api.myvariant_api import myvariant_api
from api.pubmed_api import pubmed_api
from config import OPENAI_API_KEY
logging.getLogger("llama_index").setLevel(logging.WARNING)
# file_handler = logging.FileHandler('utils.log')
# Configure the logging settings
# logging.basicConfig(level=logging.INFO, handlers=[file_handler])
MAX_TOKENS = 4097
api_info_mapping = {
"mygene": mygene_api,
"PubMed": pubmed_api,
"myvariant": myvariant_api,
}
api_key = OPENAI_API_KEY or os.environ["OPENAI_API_KEY"]
openai.api_key = api_key
def get_input(prompt, type_=None, min_=None, max_=None, range_=None):
if min_ is not None and max_ is not None and max_ < min_:
raise ValueError("min_ must be less than or equal to max_.")
while True:
ui = input(prompt)
if type_ is not None:
try:
ui = type_(ui)
except ValueError:
print(f"Input type must be {type_.__name__}!")
continue
if max_ is not None and ui > max_:
print(f"Input must be less than or equal to {max_}.")
elif min_ is not None and ui < min_:
print(f"Input must be greater than or equal to {min_}.")
elif range_ is not None and ui not in range_:
if isinstance(range_, range):
template = "Input must be between {} and {}."
print(template.format(range_.start, range_.stop))
else:
template = "Input must be {}."
print(template.format(", ".join(map(str, range_))))
else:
return ui
def select_task(task_list):
# Task list is actually a Queue
task_list = list(task_list)
print("\n\n")
choice = get_input(
Fore.LIGHTGREEN_EX
+ "\033[1mWhich task would you like to execute? Type 0 to create your own task! \033[0m",
type_=int,
min_=0,
max_=len(task_list),
)
if choice == 0:
task = input(Fore.LIGHTGREEN_EX + "\033[1mWrite your task! \033[0m")
else:
task = task_list.pop(choice - 1)
return task, deque(task_list)
def num_tokens_from_string(string: str, encoding_name: str = "gpt2") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def get_key_results(index, objective, top_k=20, additional_queries=[]):
"""Run final queries over retrieved documents and store in doc_store."""
if not index.docstore.docs:
print(
Fore.RED
+ "\033[1m\n! WARNING: NO TASKS RETURNED RESULTS. PLEASE TWEAK YOUR OBJECTIVE AND CHECK SPELLING. !\n\033[0m"
)
return []
print(Fore.CYAN + "\033[1m\n*****COMPILING KEY RESULTS*****\n\033[0m")
key_results = []
queries = [
"Give a brief high level summary of all the data.",
"Briefly list all the main points that the data covers.",
"Generate several creative hypotheses given the data.",
"What are some high level research directions to explore further given the data?",
f"Do your best to answer the objective: {objective} given the information.",
]
for query in queries:
print(Fore.CYAN + f"\nCOMPILING RESULT {query}\n")
res = None
try:
res, citation_data = query_knowledge_base(
index=index, query=query, list_index=False, top_k=top_k
)
except Exception as e:
print(f"Exception getting key result {query}, error {e}")
if res:
query = f"## {query}\n\n"
res_html = markdown.markdown(res)
res_citation = markdown.markdown(citation_data)
key_results.append(
(query, f"{res_html}\n\n### Citations\n\n{res_citation}\n\n")
)
print(Fore.CYAN + f"\nRESULTS COMPILED. SAVED TO DIRECTORY `out`\n")
return key_results
def get_max_completion_len(prompt):
tokens = num_tokens_from_string(prompt)
return MAX_TOKENS - tokens
def execute_python(code: str):
# ret is defined in the code string
loc = {}
try:
exec(code, globals(), loc)
except Exception as e:
print(f"Exception executing code {code}, {e}")
return
return loc["ret"]
def process_myvariant_result(results):
processed_result = []
if not isinstance(results, list):
results = [results]
for result in results:
variant_name = result.get("_id")
gene_affected = result.get("cadd", {}).get("gene", {}).get("genename")
consequence = result.get("cadd", {}).get("consequence")
cadd_score = result.get("cadd", {}).get("phred")
rsid = result.get("dbsnp", {}).get("rsid")
variant_data = ""
citation_data = ""
if variant_name:
variant_data += f"Variant Name: {variant_name}\n"
if gene_affected:
variant_data += f"Gene Affected: {gene_affected}\n"
if consequence:
variant_data += f"Consequence: {consequence}\n"
if cadd_score is not None:
variant_data += f"CADD Score: {cadd_score}\n"
if rsid:
variant_data += f"rsID: {rsid}\n"
processed_result.append((variant_data, {"citation_data": citation_data}))
return processed_result
def process_mygene_result(results):
processed_result = []
if not isinstance(results, list):
results = [results]
# Each result will be split into 2 documents: summary and pathway
for json_data in results:
name = json_data.get("name")
refseq_genomic = json_data.get("refseq", {}).get("genomic", [])
refseq_rna = json_data.get("refseq", {}).get("rna", [])
symbol = json_data.get("symbol")
taxid = json_data.get("taxid")
type_of_gene = json_data.get("type_of_gene")
pos = json_data.get("genomic_pos_hg19")
summary = json_data.get("summary")
generif = json_data.get("generif")
output_summary = ""
citation_data = ""
# Summary
if name:
output_summary += f"Gene Name: {name}\n"
if refseq_genomic:
output_summary += f"RefSeq genomic: {', '.join(refseq_genomic)}\n"
if refseq_rna:
output_summary += f"RefSeq rna: {', '.join(refseq_rna)}\n"
if symbol:
output_summary += f"Symbol: {symbol}\n"
if taxid:
output_summary += f"Tax ID: {taxid}\n"
if type_of_gene and type_of_gene != "unknown":
output_summary += f"Type of gene: {type_of_gene}\n"
if pos:
output_summary += f"Position: {pos}\n"
if summary:
output_summary += f"Summary of {name}: {summary}\n"
else:
# If not summary, use generifs.
if generif:
# Take 20 rifs max. Some genes have hundreds of rifs and the results size explodes.
for rif in generif[:20]:
pubmed = rif.get("pubmed")
text = rif.get("text")
if text:
output_summary += text
if pubmed:
citation_data += f" Pubmed ID: {pubmed}"
output_summary = output_summary.strip()
# logging.info(f"Mygene Summary result {name}, length is {str(len(output_summary))}")
if output_summary:
processed_result.append((output_summary, {"citation_data": citation_data}))
# Pathway
pathway = json_data.get("pathway")
if pathway:
kegg = pathway.get("kegg", [])
pid = pathway.get("pid", [])
reactome = pathway.get("reactome", [])
wikipathways = pathway.get("wikipathways", [])
netpath = pathway.get("netpath", [])
biocarta = pathway.get("biocarta", [])
pathway_elements = {
"kegg": kegg,
"pid": pid,
"reactome": reactome,
"wikipathways": wikipathways,
"netpath": netpath,
"biocarta": biocarta,
}
# mygene returns dicts instead of lists if singleton
# Wrap with list if not list
for k, v in pathway_elements.items():
if type(v) is not list:
pathway_elements[k] = [v]
output_pathway = ""
citation_data = ""
if name:
output_pathway += f"Gene Name: {name}\n"
if symbol:
output_pathway += f"Symbol: {symbol}\n"
if taxid:
output_pathway += f"Tax ID: {taxid}\n"
if type_of_gene and type_of_gene != "unknown":
output_pathway += f"Type of gene: {type_of_gene}\n"
if refseq_genomic:
output_pathway += f"RefSeq genomic: {', '.join(refseq_genomic)}\n"
if refseq_rna:
output_pathway += f"RefSeq rna: {', '.join(refseq_rna)}\n"
if pos:
output_pathway += f"Position: {pos}\n"
output_pathway += f"PATHWAYS\n\n"
for k, v in pathway_elements.items():
output_pathway += f"\n{k}:\n"
for item in v:
output_pathway += f" ID: {item.get('id', '')}"
output_pathway += f" Name: {item.get('name', '')}"
# logging.info(f"Mygene Pathway result {name}, length is {len(output_pathway)}")
output_pathway = output_pathway.strip()
if output_pathway:
processed_result.append(
(output_pathway, {"citation_data": citation_data})
)
return processed_result
def process_pubmed_result(result):
try:
root = ET.fromstring(result)
except Exception as e:
print(f"Cannot parse pubmed result, expected xml. {e}")
print("Adding whole document. Note this will lead to suboptimal results.")
return result if isinstance(result, list) else [result]
processed_result = []
for article in root:
res_ = ""
citation_data = ""
for title in article.iter("Title"):
res_ += f"{title.text}\n"
citation_data += f"{title.text}\n"
for abstract in article.iter("AbstractText"):
res_ += f"{abstract.text}\n"
for author in article.iter("Author"):
try:
citation_data += f"{author.find('LastName').text}"
citation_data += f", {author.find('ForeName').text}\n"
except:
pass
for journal in article.iter("Journal"):
res_ += f"{journal.find('Title').text}\n"
citation_data += f"{journal.find('Title').text}\n"
for volume in article.iter("Volume"):
citation_data += f"{volume.text}\n"
for issue in article.iter("Issue"):
citation_data += f"{issue.text}\n"
for pubdate in article.iter("PubDate"):
try:
year = pubdate.find("Year").text
citation_data += f"{year}"
month = pubdate.find("Month").text
citation_data += f"-{month}"
day = pubdate.find("Day").text
citation_data += f"-{day}\n"
except:
pass
for doi in article.iter("ELocationID"):
if doi.get("EIdType") == "doi":
res_ += f"{doi.text}\n"
if res_:
processed_result.append((res_, {"citation_data": citation_data}))
return processed_result
def get_code_params(code: str, preparam_text: str, postparam_text: str):
l = len(preparam_text)
preparam_index = code.find(preparam_text)
postparam_index = code.find(postparam_text)
if preparam_index == -1 or postparam_index == -1:
return
params = code[preparam_index + l : postparam_index].strip()
if params == "":
return
return params
def validate_llm_response(goal, response):
validation_prompt = f"I gave an LLM this goal: '{goal}' and it gave this response: '{response}'. Is this reasonable, or did something go wrong? [yes|no]"
validation_response = (
openai.Completion.create(
engine="text-davinci-003", prompt=validation_prompt, temperature=0.0
)
.choices[0]
.text.strip()
)
if validation_response.lower() == "yes":
return True
else:
return False
def generate_tool_prompt(task):
if "MYVARIANT" in task:
api_name = "myvariant"
elif "MYGENE" in task:
api_name = "mygene"
elif "PUBMED" in task:
api_name = "PubMed"
else:
print(f"Error. Tool not found in task: {task}")
return None
api_info = api_info_mapping[api_name]
prompt = f"""You have access to query the {api_name} API. If a task starts with '{api_name.upper()}:' then you should create the code to query the {api_name} API based off the documentation and return the code to complete your task. If you use the {api_name} API, do not answer with words, simply write the parameters used to call the function then cease output. Be sure it is valid python that will execute in a python interpreter.
---
Here is the {api_name} documentation
{api_info}
---
You should change the parameters to fit your specific task.
""".strip()
return prompt
def get_ada_embedding(text):
ada_embedding_max_size = 8191
text = text.replace("\n", " ")
if num_tokens_from_string(text) > ada_embedding_max_size:
# There must be a better way to do this.
text = text[:ada_embedding_max_size]
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[
"data"
][0]["embedding"]
def insert_doc_llama_index(index, doc_id, data, metadata={}, embedding=None):
if not embedding:
embedding = get_ada_embedding(data)
doc = Document(text=data, embedding=embedding, doc_id=doc_id, metadata=metadata)
doc.excluded_llm_metadata_keys = ["citation_data"]
doc.excluded_embed_metadata_keys = ["citation_data"]
index.insert(doc)
def handle_python_result(result, cache, task, doc_store, doc_store_task_key):
results_returned = True
params = result
doc_store["tasks"][doc_store_task_key]["result_code"] = result
tool = task.split(":")[0]
if tool == "MYGENE":
result = (
"from api.mygene_wrapper import mygene_wrapper\n"
+ result
+ "\nret = mygene_wrapper(query_term, size, from_)"
)
elif tool == "MYVARIANT":
result = (
"from api.myvariant_wrapper import myvariant_wrapper\n"
+ result
+ "\nret = myvariant_wrapper(query_term)"
)
elif tool == "PUBMED":
result = (
"from api.pubmed_wrapper import pubmed_wrapper\n"
+ result
+ "\nret = pubmed_wrapper(query_term, retmax, retstart)"
)
executed_result = execute_python(result)
if type(executed_result) is list:
executed_result = list(filter(lambda x: x, executed_result))
if (executed_result is not None) and (
not executed_result
): # Execution complete succesfully, but executed result was empty list
results_returned = False
result = "NOTE: Code returned no results\n\n" + result
print(Fore.BLUE + f"\nTask '{task}' completed but returned no results")
if "MYVARIANT" in task:
if results_returned:
cache["MYVARIANT"].append(f"---\n{params}---\n")
else:
cache["MYVARIANT"].append(
f"---\nNote: This call returned no results\n{params}---\n"
)
processed_result = process_myvariant_result(executed_result)
if "MYGENE" in task:
if results_returned:
cache["MYGENE"].append(f"---\n{params}---\n")
else:
cache["MYGENE"].append(
f"---\nNote: This call returned no results\n{params}---\n"
)
processed_result = process_mygene_result(executed_result)
if "PUBMED" in task:
if results_returned:
cache["PUBMED"].append(f"---\n{params}---\n")
else:
cache["PUBMED"].append(
f"---\nNote: This call returned no results\n{params}---\n"
)
processed_result = process_pubmed_result(executed_result)
if executed_result is None:
result = "NOTE: Code did not run succesfully\n\n" + result
print(
Fore.BLUE + f"Task '{task}' failed. Code {result} did not run succesfully."
)
if "MYGENE" in task:
cache["MYGENE"].append(
f"---\nNote: This call did not run succesfully\n{params}---\n"
)
if "PUBMED" in task:
cache["PUBMED"].append(
f"---\nNote: This call did not run succesfully\n{params}---\n"
)
if "MYVARIANT" in task:
cache["MYVARIANT"].append(
f"---\nNote: This call did not run succesfully\n{params}---\n"
)
return
return processed_result
def handle_results(
result, index, doc_store, doc_store_key, task_id_counter, RESULT_CUTOFF
):
for i, r in enumerate(result):
res, metadata = r[0], r[1]
res = str(res)[
:RESULT_CUTOFF
] # Occasionally an enormous result will slow the program to a halt. Not ideal to lose results but putting in place for now.
vectorized_data = get_ada_embedding(res)
task_id = f"doc_id_{task_id_counter}_{i}"
insert_doc_llama_index(
index=index,
doc_id=task_id,
data=res,
metadata=metadata,
embedding=vectorized_data,
)
doc_store["tasks"][doc_store_key]["results"].append(
{
"task_id_counter": task_id_counter,
"vectorized_data": vectorized_data,
"output": res,
"metadata": metadata,
}
)
def query_knowledge_base(
index,
query="Give a detailed but terse overview of all the information. Start with a high level summary and then go into details. Do not include any further instruction. Do not include filler words.",
response_mode="tree_summarize",
top_k=50,
list_index=False,
):
if not index.docstore.docs:
print(Fore.RED + "NO INFORMATION IN LLAMA INDEX")
return
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=top_k,
)
# configure response synthesizer
response_synthesizer = ResponseSynthesizer.from_args(
response_mode="tree_summarize",
)
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
)
if list_index:
query_response = index.query(query, response_mode="default")
else:
# From llama index docs: Empirically, setting response_mode="tree_summarize" also leads to better summarization results.
query_response = query_engine.query(query)
extra_info = ""
if query_response.metadata:
try:
extra_info = [
x.get("citation_data") for x in query_response.metadata.values()
]
if not any(extra_info):
extra_info = []
except Exception as e:
print("Issue getting extra info from llama index")
return query_response.response, "\n\n".join(extra_info)
def create_index(
api_key,
summaries=[],
temperature=0.0,
model_name="gpt-3.5-turbo-16k",
max_tokens=6000,
):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
documents = []
for i, summary in enumerate(summaries):
doc = Document(text=summary, doc_id=str(i))
doc.excluded_llm_metadata_keys = ["citation_data"]
doc.excluded_embed_metadata_keys = ["citation_data"]
documents.append(doc)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size=4000
)
return GPTVectorStoreIndex(documents, service_context=service_context)
def create_graph_index(
api_key,
indicies=[],
summaries=[],
temperature=0.0,
model_name="text-davinci-003",
max_tokens=2000,
):
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
graph = ComposableGraph.from_indices(
GPTListIndex,
indicies,
index_summaries=summaries,
service_context=service_context,
)
return graph
def create_list_index(
api_key,
summaries=[],
temperature=0.0,
model_name="text-davinci-003",
max_tokens=2000,
):
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
documents = []
for i, summary in enumerate(summaries):
documents.append(Document(text=summary, doc_id=str(i)))
index = GPTListIndex.from_documents(documents, service_context=service_context)
return index
@backoff.on_exception(
partial(backoff.expo, max_value=50),
(
openai.error.RateLimitError,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.ServiceUnavailableError,
openai.error.Timeout,
),
)
def get_gpt_completion(
prompt,
temp=0.0,
engine="text-davinci-003",
top_p=1,
frequency_penalty=0,
presence_penalty=0,
):
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=get_max_completion_len(prompt),
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
)
return response.choices[0].text.strip()
@backoff.on_exception(
partial(backoff.expo, max_value=50),
(
openai.error.RateLimitError,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.ServiceUnavailableError,
openai.error.Timeout,
),
)
def get_gpt_chat_completion(
system_prompt, user_prompt, model="gpt-3.5-turbo-16k", temp=0.0
):
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
temperature=temp,
)
return response.choices[0]["message"]["content"].strip()
### FILE UTILS ###
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def write_file(path, contents, mode="w"):
with open(path, mode) as f:
f.write(contents)
def read_file(path, mode="r"):
with open(path, mode) as f:
contents = f.read()
if not contents:
print(f"WARNING: file {path} empty")
return contents
def sanitize_dir_name(dir_name):
# Remove invalid characters
dir_name = re.sub(r'[<>:"/\|?*]', "_", dir_name)
dir_name = dir_name.replace(" ", "_")
# Remove leading period
if dir_name.startswith("."):
dir_name = dir_name[1:]
return dir_name
def save(
index,
doc_store,
OBJECTIVE,
current_datetime,
task_id_counter,
task_list,
completed_tasks,
cache,
reload_count,
summaries,
):
# Make basepath.
path = os.path.join("./out", sanitize_dir_name(OBJECTIVE) + "_" + current_datetime)
make_dir(path)
# Save llama index.
index.storage_context.persist(persist_dir=os.path.join(path, "index.json"))
# Save program state.
state = {
"summaries": summaries,
"reload_count": reload_count,
"task_id_counter": task_id_counter,
"task_list": list(task_list),
"completed_tasks": completed_tasks,
"cache": dict(cache),
"current_datetime": current_datetime,
"objective": OBJECTIVE,
}
with open(os.path.join(path, "state.json"), "w") as outfile:
json.dump(state, outfile)
# Save results.
if "key_results" in doc_store:
if reload_count:
new_time = str(time.strftime("%Y-%m-%d_%H-%M-%S"))
header = f"# {OBJECTIVE}\nDate: {new_time}\n\n"
else:
header = f"# {OBJECTIVE}\nDate: {current_datetime}\n\n"
key_findings_path = os.path.join(path, f"key_findings_{reload_count}.md")
write_file(key_findings_path, header, mode="a+")
for res in doc_store["key_results"]:
content = f"{res[0]}{res[1]}"
write_file(key_findings_path, content, mode="a+")
for task, doc in doc_store["tasks"].items():
doc_path = os.path.join(path, task)
make_dir(doc_path)
result_path = os.path.join(doc_path, "results")
make_dir(result_path)
if "executive_summary" in doc:
write_file(
os.path.join(result_path, "executive_summary.txt"),
doc["executive_summary"],
)
if "result_code" in doc:
write_file(os.path.join(result_path, "api_call.txt"), doc["result_code"])
for i, result in enumerate(doc["results"]):
result_path_i = os.path.join(result_path, str(i))
make_dir(result_path_i)
write_file(os.path.join(result_path_i, "output.txt"), result["output"])
write_file(
os.path.join(result_path_i, "vector.txt"),
str(result["vectorized_data"]),
)
def load(path):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(
temperature=0,
openai_api_key=api_key,
model_name="gpt-3.5-turbo-16k",
max_tokens=6000,
)
)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size=4000
)
# rebuild storage context
storage_context = StorageContext.from_defaults(
persist_dir=os.path.join(path, "index.json")
)
index = load_index_from_storage(
storage_context=storage_context, service_context=service_context
)
state_path = os.path.join(path, "state.json")
if os.path.exists(state_path):
with open(state_path, "r") as f:
json_data = json.load(f)
try:
reload_count = json_data["reload_count"] + 1
task_id_counter = json_data["task_id_counter"]
task_list = json_data["task_list"]
completed_tasks = json_data["completed_tasks"]
cache = defaultdict(list, json_data["cache"])
current_datetime = json_data["current_datetime"]
objective = json_data["objective"]
summaries = json_data["summaries"]
except KeyError as e:
raise Exception(
f"Missing key '{e.args[0]}' in JSON file at path '{state_path}'"
)
return (
index,
task_id_counter,
deque(task_list),
completed_tasks,
cache,
current_datetime,
objective,
reload_count,
summaries,
)
| [
"llama_index.ResponseSynthesizer.from_args",
"llama_index.indices.composability.ComposableGraph.from_indices",
"llama_index.query_engine.RetrieverQueryEngine",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.GPTVectorStoreIndex",
"llama_index.GPTListIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.Document"
] | [((3048, 3084), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (3069, 3084), False, 'import tiktoken\n'), ((14814, 14888), 'llama_index.Document', 'Document', ([], {'text': 'data', 'embedding': 'embedding', 'doc_id': 'doc_id', 'metadata': 'metadata'}), '(text=data, embedding=embedding, doc_id=doc_id, metadata=metadata)\n', (14822, 14888), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((19422, 19479), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'top_k'}), '(index=index, similarity_top_k=top_k)\n', (19442, 19479), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((19568, 19629), 'llama_index.ResponseSynthesizer.from_args', 'ResponseSynthesizer.from_args', ([], {'response_mode': '"""tree_summarize"""'}), "(response_mode='tree_summarize')\n", (19597, 19629), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((19693, 19782), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (19713, 19782), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((21128, 21202), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(4000)'}), '(llm_predictor=llm_predictor, chunk_size=4000)\n', (21156, 21202), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((21228, 21291), 'llama_index.GPTVectorStoreIndex', 'GPTVectorStoreIndex', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (21247, 21291), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((21681, 21738), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (21709, 21738), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((21752, 21869), 'llama_index.indices.composability.ComposableGraph.from_indices', 'ComposableGraph.from_indices', (['GPTListIndex', 'indicies'], {'index_summaries': 'summaries', 'service_context': 'service_context'}), '(GPTListIndex, indicies, index_summaries=\n summaries, service_context=service_context)\n', (21780, 21869), False, 'from llama_index.indices.composability import ComposableGraph\n'), ((22293, 22350), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (22321, 22350), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((22491, 22562), 'llama_index.GPTListIndex.from_documents', 'GPTListIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (22518, 22562), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((22609, 22644), 'functools.partial', 'partial', (['backoff.expo'], {'max_value': '(50)'}), '(backoff.expo, max_value=50)\n', (22616, 22644), False, 'from functools import partial\n'), ((23696, 23862), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'model', 'messages': "[{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content':\n user_prompt}]", 'temperature': 'temp'}), "(model=model, messages=[{'role': 'system',\n 'content': system_prompt}, {'role': 'user', 'content': user_prompt}],\n temperature=temp)\n", (23724, 23862), False, 'import openai\n'), ((23344, 23379), 'functools.partial', 'partial', (['backoff.expo'], {'max_value': '(50)'}), '(backoff.expo, max_value=50)\n', (23351, 23379), False, 'from functools import partial\n'), ((24449, 24486), 're.sub', 're.sub', (['"""[<>:"/\\\\|?*]"""', '"""_"""', 'dir_name'], {}), '(\'[<>:"/\\\\|?*]\', \'_\', dir_name)\n', (24455, 24486), False, 'import re\n'), ((27227, 27301), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(4000)'}), '(llm_predictor=llm_predictor, chunk_size=4000)\n', (27255, 27301), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((27471, 27565), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (27494, 27565), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((27592, 27624), 'os.path.join', 'os.path.join', (['path', '"""state.json"""'], {}), "(path, 'state.json')\n", (27604, 27624), False, 'import os\n'), ((27632, 27658), 'os.path.exists', 'os.path.exists', (['state_path'], {}), '(state_path)\n', (27646, 27658), False, 'import os\n'), ((899, 931), 'logging.getLogger', 'logging.getLogger', (['"""llama_index"""'], {}), "('llama_index')\n", (916, 931), False, 'import logging\n'), ((2879, 2895), 'collections.deque', 'deque', (['task_list'], {}), '(task_list)\n', (2884, 2895), False, 'from collections import defaultdict, deque\n'), ((10649, 10670), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['result'], {}), '(result)\n', (10662, 10670), True, 'import xml.etree.ElementTree as ET\n'), ((24036, 24056), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (24050, 24056), False, 'import os\n'), ((24066, 24083), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (24077, 24083), False, 'import os\n'), ((25481, 25506), 'json.dump', 'json.dump', (['state', 'outfile'], {}), '(state, outfile)\n', (25490, 25506), False, 'import json\n'), ((25821, 25874), 'os.path.join', 'os.path.join', (['path', 'f"""key_findings_{reload_count}.md"""'], {}), "(path, f'key_findings_{reload_count}.md')\n", (25833, 25874), False, 'import os\n'), ((26151, 26175), 'os.path.join', 'os.path.join', (['path', 'task'], {}), '(path, task)\n', (26163, 26175), False, 'import os\n'), ((26225, 26258), 'os.path.join', 'os.path.join', (['doc_path', '"""results"""'], {}), "(doc_path, 'results')\n", (26237, 26258), False, 'import os\n'), ((28455, 28471), 'collections.deque', 'deque', (['task_list'], {}), '(task_list)\n', (28460, 28471), False, 'from collections import defaultdict, deque\n'), ((4448, 4470), 'markdown.markdown', 'markdown.markdown', (['res'], {}), '(res)\n', (4465, 4470), False, 'import markdown\n'), ((4498, 4530), 'markdown.markdown', 'markdown.markdown', (['citation_data'], {}), '(citation_data)\n', (4515, 4530), False, 'import markdown\n'), ((20669, 20779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (20679, 20779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((21492, 21598), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (21498, 21598), False, 'from langchain import OpenAI\n'), ((22104, 22210), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (22110, 22210), False, 'from langchain import OpenAI\n'), ((25023, 25055), 'os.path.join', 'os.path.join', (['path', '"""index.json"""'], {}), "(path, 'index.json')\n", (25035, 25055), False, 'import os\n'), ((25422, 25454), 'os.path.join', 'os.path.join', (['path', '"""state.json"""'], {}), "(path, 'state.json')\n", (25434, 25454), False, 'import os\n'), ((27041, 27144), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'api_key', 'model_name': '"""gpt-3.5-turbo-16k"""', 'max_tokens': '(6000)'}), "(temperature=0, openai_api_key=api_key, model_name=\n 'gpt-3.5-turbo-16k', max_tokens=6000)\n", (27051, 27144), False, 'from langchain.chat_models import ChatOpenAI\n'), ((27419, 27451), 'os.path.join', 'os.path.join', (['path', '"""index.json"""'], {}), "(path, 'index.json')\n", (27431, 27451), False, 'import os\n'), ((27725, 27737), 'json.load', 'json.load', (['f'], {}), '(f)\n', (27734, 27737), False, 'import json\n'), ((14550, 14619), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': '[text]', 'model': '"""text-embedding-ada-002"""'}), "(input=[text], model='text-embedding-ada-002')\n", (14573, 14619), False, 'import openai\n'), ((25615, 25649), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%H-%M-%S"""'], {}), "('%Y-%m-%d_%H-%M-%S')\n", (25628, 25649), False, 'import time\n'), ((26369, 26419), 'os.path.join', 'os.path.join', (['result_path', '"""executive_summary.txt"""'], {}), "(result_path, 'executive_summary.txt')\n", (26381, 26419), False, 'import os\n'), ((26533, 26574), 'os.path.join', 'os.path.join', (['result_path', '"""api_call.txt"""'], {}), "(result_path, 'api_call.txt')\n", (26545, 26574), False, 'import os\n'), ((26771, 26812), 'os.path.join', 'os.path.join', (['result_path_i', '"""output.txt"""'], {}), "(result_path_i, 'output.txt')\n", (26783, 26812), False, 'import os\n'), ((26872, 26913), 'os.path.join', 'os.path.join', (['result_path_i', '"""vector.txt"""'], {}), "(result_path_i, 'vector.txt')\n", (26884, 26913), False, 'import os\n'), ((28018, 28055), 'collections.defaultdict', 'defaultdict', (['list', "json_data['cache']"], {}), "(list, json_data['cache'])\n", (28029, 28055), False, 'from collections import defaultdict, deque\n'), ((13093, 13192), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'validation_prompt', 'temperature': '(0.0)'}), "(engine='text-davinci-003', prompt=\n validation_prompt, temperature=0.0)\n", (13117, 13192), False, 'import openai\n')] |
import streamlit as st
import requests
import base64
import os
import llama_index
from audio_recorder_streamlit import audio_recorder
from openai import OpenAI
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from scipy.io.wavfile import write
os.environ['OPENAI_API_KEY'] = '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'
API_KEY = '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'
def RAG(text):
documents = SimpleDirectoryReader("db3").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(text)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def linkRAGhindi(text):
new_prompt="निम्नलिखित प्रश्न के लिए सबसे उपयुक्त वेबसाइट लिंक दें"+text
documents = SimpleDirectoryReader("db1").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(new_prompt)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def rechindi(text):
new_prompt="निम्नलिखित प्रश्न के लिए सबसे उपयुक्त वेबसाइट लिंक दें"+text
documents = SimpleDirectoryReader("db2").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(new_prompt)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def linkRAGenglish(text):
new_prompt="Give the most appropiate website link for the following question "+text
documents = SimpleDirectoryReader("db1").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(new_prompt)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def recenglish(text):
new_prompt="Give the most intresting other website link for the following question "+text
documents = SimpleDirectoryReader("db2").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(new_prompt)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def transcribe_text_to_voice_english(audio_location):
client = OpenAI(api_key=API_KEY)
audio_file = open(audio_location, "rb")
transcript = client.audio.transcriptions.create(model="whisper-1", file=audio_file)
return transcript.text
def transcribe_text_to_voice_hindi(audio_location):
url = "https://api.runpod.ai/v2/faster-whisper/runsync"
with open(audio_location, "rb") as audio_file:
audio_base64 = base64.b64encode(audio_file.read()).decode('utf-8')
payload = {
"input": {
"audio_base64": audio_base64,
"model": "small",
"transcription": "plain_text",
"translate": True,
"language": "hi",
"temperature": 0,
"best_of": 5,
"beam_size": 5,
"patience": 1,
"suppress_tokens": "-1",
"condition_on_previous_text": False,
"temperature_increment_on_fallback": 0.2,
"compression_ratio_threshold": 2.4,
"logprob_threshold": -1,
"no_speech_threshold": 0.6,
"word_timestamps": False
},
"enable_vad": False
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "X01PG949AHTOVRYHLQZKSRIWN82UHBUU5JYLNAHM"
}
response = requests.post(url, json=payload, headers=headers)
response_json = response.json()
transcription = response_json["output"]["transcription"]
translation = response_json["output"]["translation"].strip().split('\n')[-1].strip()
return transcription, translation
def recommendation(text):
client = OpenAI(api_key=API_KEY)
messages = [{"role": "user", "content": text}]
response = client.chat.completions.create(model="gpt-3.5-turbo-1106", messages=messages)
return response.choices[0].message.content
def text_to_speech_ai(speech_file_path, api_response):
client = OpenAI(api_key=API_KEY)
response = client.audio.speech.create(model="tts-1",voice="nova",input=api_response)
response.stream_to_file(speech_file_path)
st.title("🚀 SHRESHTH 💬 Bhuvan Assistant")
# Radio wheel for language selection
language = st.radio("Language/भाषा",["English", "हिंदी"])
# Displaying description based on selected language
if language == "English":
mode = st.radio("Select Mode Of Input", ["Voice","Text"])
st.write("Smart - Helpful - Robust - Effortless - System for Text-to-speech and Human-like Assistance")
if mode == "Voice" or mode == "आवाज":
st.write("Click on the voice recorder and let me know how I can help you today with your Queries Regarding Bhuvan!")
audio_bytes = audio_recorder(
text="",
recording_color="#e8b62c",
neutral_color="#6aa36f",
icon_name="microphone",
icon_size="2x",
)
if audio_bytes:
# Save the Recorded File
audio_location = "audio_file.wav"
with open(audio_location, "wb") as f:
f.write(audio_bytes)
if language == "English":
text=transcribe_text_to_voice_english(audio_location)
st.write(text)
else:
text,trans=transcribe_text_to_voice_hindi(audio_location)
st.write(text)
link_response = linkRAGenglish(text)
st.write("SHRESHTH:", link_response)
api_response = RAG(text)
st.write("SHRESHTH:", api_response)
speech_file_path = 'audio_response.mp3'
text_to_speech_ai(speech_file_path, api_response)
st.audio(speech_file_path)
recctext="recommend top three other websites that could interest the user depending on this link and answer : " + link_response + api_response
recc=linkRAGenglish(recctext)
st.write("SHRESHTH:", recc)
else:
# Text input option
text_input = st.text_area("Enter your text here and press Enter", "")
if st.button("Submit"):
# Process the entered text
link_response = linkRAGenglish(text_input)
st.write("SHRESHTH:", link_response)
api_response = RAG(text_input)
st.write("SHRESHTH:", api_response)
# Read out the text response using tts
speech_file_path = 'audio_response.mp3'
text_to_speech_ai(speech_file_path, api_response)
st.audio(speech_file_path)
recctext="recommend top three other websites that could interest the user depending on this link and answer : " + link_response + api_response
recc=linkRAGenglish(recctext)
st.write("SHRESHTH:", recc)
else:
mode = st.radio("इनपुट मोड का चयन करें", ["आवाज", "टेक्स्ट"])
st.write("स्मार्ट - सहायक - मजबूत - प्रयासहीन - पाठ-से-बोल के लिए एक सिस्टम और मानव जैसी सहायता")
if mode == "Voice" or mode == "आवाज" or mode == "ভয়েস":
st.write("आवाज रेकॉर्डर पर क्लिक करें और मुझसे यह बताएं कि आज आपकी भुवन से संबंधित सवालों में मैं आपकी कैसे मदद कर सकता हूँ!")
audio_bytes = audio_recorder(
text="",
recording_color="#e8b62c",
neutral_color="#6aa36f",
icon_name="microphone",
icon_size="2x",
)
if audio_bytes:
# Save the Recorded File
audio_location = "audio_file.wav"
with open(audio_location, "wb") as f:
f.write(audio_bytes)
if language == "English":
text=transcribe_text_to_voice_english(audio_location)
st.write(text)
else:
text,trans=transcribe_text_to_voice_hindi(audio_location)
st.write(text)
link_response = linkRAGhindi(text)
st.write("श्रेष्ठ:", link_response)
api_response = RAG(text)
st.write("श्रेष्ठ:", api_response)
# Read out the text response using tts
speech_file_path = 'audio_response.mp3'
text_to_speech_ai(speech_file_path, api_response)
st.audio(speech_file_path)
recctext="recommend top three other websites that could interest the user depending on this link and answer : " + link_response + api_response
recc=rechindi(recctext)
st.write("श्रेष्ठ:", recc)
else:
# Text input option
text_input = st.text_area("आप यहाँ अपना टेक्स्ट दर्ज करें और एंटर दबाएं", "")
if st.button("एंटर"):
# Process the entered text
link_response = linkRAGhindi(text_input)
st.write("श्रेष्ठ:", link_response)
api_response = RAG(text_input)
st.write("श्रेष्ठ:", api_response)
# Read out the text response using tts
speech_file_path = 'audio_response.mp3'
text_to_speech_ai(speech_file_path, api_response)
st.audio(speech_file_path)
recctext="recommend top three other websites that could interest the user depending on this link and answer : " + link_response + api_response
recc=rechindi(recctext)
st.write("श्रेष्ठ:", recc)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((4762, 4803), 'streamlit.title', 'st.title', (['"""🚀 SHRESHTH 💬 Bhuvan Assistant"""'], {}), "('🚀 SHRESHTH 💬 Bhuvan Assistant')\n", (4770, 4803), True, 'import streamlit as st\n'), ((4853, 4900), 'streamlit.radio', 'st.radio', (['"""Language/भाषा"""', "['English', 'हिंदी']"], {}), "('Language/भाषा', ['English', 'हिंदी'])\n", (4861, 4900), True, 'import streamlit as st\n'), ((492, 534), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (523, 534), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((950, 992), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (981, 992), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1410, 1452), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1441, 1452), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1886, 1928), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1917, 1928), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2359, 2401), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2390, 2401), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2714, 2737), 'openai.OpenAI', 'OpenAI', ([], {'api_key': 'API_KEY'}), '(api_key=API_KEY)\n', (2720, 2737), False, 'from openai import OpenAI\n'), ((4000, 4049), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (4013, 4049), False, 'import requests\n'), ((4315, 4338), 'openai.OpenAI', 'OpenAI', ([], {'api_key': 'API_KEY'}), '(api_key=API_KEY)\n', (4321, 4338), False, 'from openai import OpenAI\n'), ((4598, 4621), 'openai.OpenAI', 'OpenAI', ([], {'api_key': 'API_KEY'}), '(api_key=API_KEY)\n', (4604, 4621), False, 'from openai import OpenAI\n'), ((4989, 5040), 'streamlit.radio', 'st.radio', (['"""Select Mode Of Input"""', "['Voice', 'Text']"], {}), "('Select Mode Of Input', ['Voice', 'Text'])\n", (4997, 5040), True, 'import streamlit as st\n'), ((5044, 5157), 'streamlit.write', 'st.write', (['"""Smart - Helpful - Robust - Effortless - System for Text-to-speech and Human-like Assistance"""'], {}), "(\n 'Smart - Helpful - Robust - Effortless - System for Text-to-speech and Human-like Assistance'\n )\n", (5052, 5157), True, 'import streamlit as st\n'), ((7402, 7456), 'streamlit.radio', 'st.radio', (['"""इनपुट मोड का चयन करें"""', "['आवाज', 'टेक्स्ट']"], {}), "('इनपुट मोड का चयन करें', ['आवाज', 'टेक्स्ट'])\n", (7410, 7456), True, 'import streamlit as st\n'), ((7461, 7568), 'streamlit.write', 'st.write', (['"""स्मार्ट - सहायक - मजबूत - प्रयासहीन - पाठ-से-बोल के लिए एक सिस्टम और मानव जैसी सहायता"""'], {}), "(\n 'स्मार्ट - सहायक - मजबूत - प्रयासहीन - पाठ-से-बोल के लिए एक सिस्टम और मानव जैसी सहायता'\n )\n", (7469, 7568), True, 'import streamlit as st\n'), ((5198, 5324), 'streamlit.write', 'st.write', (['"""Click on the voice recorder and let me know how I can help you today with your Queries Regarding Bhuvan!"""'], {}), "(\n 'Click on the voice recorder and let me know how I can help you today with your Queries Regarding Bhuvan!'\n )\n", (5206, 5324), True, 'import streamlit as st\n'), ((5337, 5456), 'audio_recorder_streamlit.audio_recorder', 'audio_recorder', ([], {'text': '""""""', 'recording_color': '"""#e8b62c"""', 'neutral_color': '"""#6aa36f"""', 'icon_name': '"""microphone"""', 'icon_size': '"""2x"""'}), "(text='', recording_color='#e8b62c', neutral_color='#6aa36f',\n icon_name='microphone', icon_size='2x')\n", (5351, 5456), False, 'from audio_recorder_streamlit import audio_recorder\n'), ((6619, 6675), 'streamlit.text_area', 'st.text_area', (['"""Enter your text here and press Enter"""', '""""""'], {}), "('Enter your text here and press Enter', '')\n", (6631, 6675), True, 'import streamlit as st\n'), ((6687, 6706), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (6696, 6706), True, 'import streamlit as st\n'), ((7629, 7765), 'streamlit.write', 'st.write', (['"""आवाज रेकॉर्डर पर क्लिक करें और मुझसे यह बताएं कि आज आपकी भुवन से संबंधित सवालों में मैं आपकी कैसे मदद कर सकता हूँ!"""'], {}), "(\n 'आवाज रेकॉर्डर पर क्लिक करें और मुझसे यह बताएं कि आज आपकी भुवन से संबंधित सवालों में मैं आपकी कैसे मदद कर सकता हूँ!'\n )\n", (7637, 7765), True, 'import streamlit as st\n'), ((7778, 7897), 'audio_recorder_streamlit.audio_recorder', 'audio_recorder', ([], {'text': '""""""', 'recording_color': '"""#e8b62c"""', 'neutral_color': '"""#6aa36f"""', 'icon_name': '"""microphone"""', 'icon_size': '"""2x"""'}), "(text='', recording_color='#e8b62c', neutral_color='#6aa36f',\n icon_name='microphone', icon_size='2x')\n", (7792, 7897), False, 'from audio_recorder_streamlit import audio_recorder\n'), ((9118, 9182), 'streamlit.text_area', 'st.text_area', (['"""आप यहाँ अपना टेक्स्ट दर्ज करें और एंटर दबाएं"""', '""""""'], {}), "('आप यहाँ अपना टेक्स्ट दर्ज करें और एंटर दबाएं', '')\n", (9130, 9182), True, 'import streamlit as st\n'), ((9194, 9211), 'streamlit.button', 'st.button', (['"""एंटर"""'], {}), "('एंटर')\n", (9203, 9211), True, 'import streamlit as st\n'), ((439, 467), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db3"""'], {}), "('db3')\n", (460, 467), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((897, 925), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db1"""'], {}), "('db1')\n", (918, 925), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1357, 1385), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db2"""'], {}), "('db2')\n", (1378, 1385), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1833, 1861), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db1"""'], {}), "('db1')\n", (1854, 1861), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2306, 2334), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db2"""'], {}), "('db2')\n", (2327, 2334), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((6045, 6081), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'link_response'], {}), "('SHRESHTH:', link_response)\n", (6053, 6081), True, 'import streamlit as st\n'), ((6131, 6166), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'api_response'], {}), "('SHRESHTH:', api_response)\n", (6139, 6166), True, 'import streamlit as st\n'), ((6293, 6319), 'streamlit.audio', 'st.audio', (['speech_file_path'], {}), '(speech_file_path)\n', (6301, 6319), True, 'import streamlit as st\n'), ((6531, 6558), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'recc'], {}), "('SHRESHTH:', recc)\n", (6539, 6558), True, 'import streamlit as st\n'), ((6814, 6850), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'link_response'], {}), "('SHRESHTH:', link_response)\n", (6822, 6850), True, 'import streamlit as st\n'), ((6906, 6941), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'api_response'], {}), "('SHRESHTH:', api_response)\n", (6914, 6941), True, 'import streamlit as st\n'), ((7119, 7145), 'streamlit.audio', 'st.audio', (['speech_file_path'], {}), '(speech_file_path)\n', (7127, 7145), True, 'import streamlit as st\n'), ((7357, 7384), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'recc'], {}), "('SHRESHTH:', recc)\n", (7365, 7384), True, 'import streamlit as st\n'), ((8483, 8518), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'link_response'], {}), "('श्रेष्ठ:', link_response)\n", (8491, 8518), True, 'import streamlit as st\n'), ((8568, 8602), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'api_response'], {}), "('श्रेष्ठ:', api_response)\n", (8576, 8602), True, 'import streamlit as st\n'), ((8786, 8812), 'streamlit.audio', 'st.audio', (['speech_file_path'], {}), '(speech_file_path)\n', (8794, 8812), True, 'import streamlit as st\n'), ((9018, 9044), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'recc'], {}), "('श्रेष्ठ:', recc)\n", (9026, 9044), True, 'import streamlit as st\n'), ((9317, 9352), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'link_response'], {}), "('श्रेष्ठ:', link_response)\n", (9325, 9352), True, 'import streamlit as st\n'), ((9408, 9442), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'api_response'], {}), "('श्रेष्ठ:', api_response)\n", (9416, 9442), True, 'import streamlit as st\n'), ((9625, 9651), 'streamlit.audio', 'st.audio', (['speech_file_path'], {}), '(speech_file_path)\n', (9633, 9651), True, 'import streamlit as st\n'), ((9857, 9883), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'recc'], {}), "('श्रेष्ठ:', recc)\n", (9865, 9883), True, 'import streamlit as st\n'), ((5846, 5860), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (5854, 5860), True, 'import streamlit as st\n'), ((5967, 5981), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (5975, 5981), True, 'import streamlit as st\n'), ((8283, 8297), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (8291, 8297), True, 'import streamlit as st\n'), ((8404, 8418), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (8412, 8418), True, 'import streamlit as st\n')] |
import json
import logging
import os
import re
import time
import xml.etree.ElementTree as ET
from collections import defaultdict, deque
from functools import partial
import backoff
import llama_index
import markdown
import openai
import tiktoken
from colorama import Fore
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from llama_index import Document, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex
from llama_index.indices.composability import ComposableGraph
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index import StorageContext, load_index_from_storage, ServiceContext
from llama_index import (
VectorStoreIndex,
ResponseSynthesizer,
)
from api.mygene_api import mygene_api
from api.pubmed_api import pubmed_api
from api.myvariant_api import myvariant_api
from config import OPENAI_API_KEY
logging.getLogger("llama_index").setLevel(logging.WARNING)
#file_handler = logging.FileHandler('utils.log')
# Configure the logging settings
#logging.basicConfig(level=logging.INFO, handlers=[file_handler])
MAX_TOKENS = 4097
api_info_mapping = {"mygene": mygene_api, "PubMed": pubmed_api, "myvariant": myvariant_api}
api_key = OPENAI_API_KEY or os.environ["OPENAI_API_KEY"]
openai.api_key = api_key
def get_input(prompt, type_=None, min_=None, max_=None, range_=None):
if min_ is not None and max_ is not None and max_ < min_:
raise ValueError("min_ must be less than or equal to max_.")
while True:
ui = input(prompt)
if type_ is not None:
try:
ui = type_(ui)
except ValueError:
print(f"Input type must be {type_.__name__}!")
continue
if max_ is not None and ui > max_:
print(f"Input must be less than or equal to {max_}.")
elif min_ is not None and ui < min_:
print(f"Input must be greater than or equal to {min_}.")
elif range_ is not None and ui not in range_:
if isinstance(range_, range):
template = "Input must be between {} and {}."
print(template.format(range_.start, range_.stop))
else:
template = "Input must be {}."
print(template.format(", ".join(map(str, range_))))
else:
return ui
def select_task(task_list):
# Task list is actually a Queue
task_list = list(task_list)
print('\n\n')
choice = get_input(Fore.LIGHTGREEN_EX + "\033[1mWhich task would you like to execute? Type 0 to create your own task! \033[0m", type_=int, min_=0, max_=len(task_list))
if choice == 0:
task = input(Fore.LIGHTGREEN_EX + "\033[1mWrite your task! \033[0m")
else:
task = task_list.pop(choice - 1)
return task, deque(task_list)
def num_tokens_from_string(string: str, encoding_name: str = "gpt2") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def get_key_results(index, objective, top_k=20, additional_queries=[]):
"""Run final queries over retrieved documents and store in doc_store."""
if not index.docstore.docs:
print(Fore.RED + "\033[1m\n! WARNING: NO TASKS RETURNED RESULTS. PLEASE TWEAK YOUR OBJECTIVE AND CHECK SPELLING. !\n\033[0m")
return []
print(Fore.CYAN + "\033[1m\n*****COMPILING KEY RESULTS*****\n\033[0m")
key_results = []
queries = [
"Give a brief high level summary of all the data.",
"Briefly list all the main points that the data covers.",
"Generate several creative hypotheses given the data.",
"What are some high level research directions to explore further given the data?",
f"Do your best to answer the objective: {objective} given the information.",
]
for query in queries:
print(Fore.CYAN + f"\nCOMPILING RESULT {query}\n")
res = None
try:
res, citation_data = query_knowledge_base(index=index, query=query, list_index=False, top_k=top_k)
except Exception as e:
print(f"Exception getting key result {query}, error {e}")
if res:
query = f"## {query}\n\n"
res_html = markdown.markdown(res)
res_citation = markdown.markdown(citation_data)
key_results.append((query, f"{res_html}\n\n### Citations\n\n{res_citation}\n\n"))
print(Fore.CYAN + f"\nRESULTS COMPILED. SAVED TO DIRECTORY `out`\n")
return key_results
def get_max_completion_len(prompt):
tokens = num_tokens_from_string(prompt)
return MAX_TOKENS - tokens
def execute_python(code: str):
# ret is defined in the code string
loc = {}
try:
exec(code, globals(), loc)
except Exception as e:
print(f"Exception executing code {code}, {e}")
return
return loc["ret"]
def process_myvariant_result(results):
processed_result = []
if not isinstance(results, list):
results = [results]
for result in results:
variant_name = result.get("_id")
gene_affected = result.get("cadd", {}).get("gene", {}).get("genename")
consequence = result.get("cadd", {}).get("consequence")
cadd_score = result.get("cadd", {}).get("phred")
rsid = result.get("dbsnp", {}).get("rsid")
variant_data = ""
citation_data = ""
if variant_name:
variant_data += f"Variant Name: {variant_name}\n"
if gene_affected:
variant_data += f"Gene Affected: {gene_affected}\n"
if consequence:
variant_data += f"Consequence: {consequence}\n"
if cadd_score is not None:
variant_data += f"CADD Score: {cadd_score}\n"
if rsid:
variant_data += f"rsID: {rsid}\n"
processed_result.append((variant_data,{"citation_data": citation_data}))
return processed_result
def process_mygene_result(results):
processed_result = []
if not isinstance(results, list):
results = [results]
# Each result will be split into 2 documents: summary and pathway
for json_data in results:
name = json_data.get("name")
refseq_genomic = json_data.get("refseq", {}).get("genomic", [])
refseq_rna = json_data.get("refseq", {}).get("rna", [])
symbol = json_data.get("symbol")
taxid = json_data.get("taxid")
type_of_gene = json_data.get("type_of_gene")
pos = json_data.get("genomic_pos_hg19")
summary = json_data.get("summary")
generif = json_data.get("generif")
output_summary = ""
citation_data = ""
# Summary
if name:
output_summary += f"Gene Name: {name}\n"
if refseq_genomic:
output_summary += f"RefSeq genomic: {', '.join(refseq_genomic)}\n"
if refseq_rna:
output_summary += f"RefSeq rna: {', '.join(refseq_rna)}\n"
if symbol:
output_summary += f"Symbol: {symbol}\n"
if taxid:
output_summary += f"Tax ID: {taxid}\n"
if type_of_gene and type_of_gene != 'unknown':
output_summary += f"Type of gene: {type_of_gene}\n"
if pos:
output_summary += f"Position: {pos}\n"
if summary:
output_summary += f"Summary of {name}: {summary}\n"
else:
# If not summary, use generifs.
if generif:
# Take 20 rifs max. Some genes have hundreds of rifs and the results size explodes.
for rif in generif[:20]:
pubmed = rif.get("pubmed")
text = rif.get("text")
if text:
output_summary += text
if pubmed:
citation_data += f" Pubmed ID: {pubmed}"
output_summary = output_summary.strip()
#logging.info(f"Mygene Summary result {name}, length is {str(len(output_summary))}")
if output_summary:
processed_result.append((output_summary, {"citation_data": citation_data}))
# Pathway
pathway = json_data.get("pathway")
if pathway:
kegg = pathway.get("kegg", [])
pid = pathway.get("pid", [])
reactome = pathway.get("reactome", [])
wikipathways = pathway.get("wikipathways", [])
netpath = pathway.get("netpath", [])
biocarta = pathway.get("biocarta", [])
pathway_elements = {"kegg": kegg, "pid": pid, "reactome": reactome, "wikipathways": wikipathways, "netpath": netpath, "biocarta": biocarta}
# mygene returns dicts instead of lists if singleton
# Wrap with list if not list
for k,v in pathway_elements.items():
if type(v) is not list:
pathway_elements[k] = [v]
output_pathway = ""
citation_data = ""
if name:
output_pathway += f"Gene Name: {name}\n"
if symbol:
output_pathway += f"Symbol: {symbol}\n"
if taxid:
output_pathway += f"Tax ID: {taxid}\n"
if type_of_gene and type_of_gene != 'unknown':
output_pathway += f"Type of gene: {type_of_gene}\n"
if refseq_genomic:
output_pathway += f"RefSeq genomic: {', '.join(refseq_genomic)}\n"
if refseq_rna:
output_pathway += f"RefSeq rna: {', '.join(refseq_rna)}\n"
if pos:
output_pathway += f"Position: {pos}\n"
output_pathway += f"PATHWAYS\n\n"
for k,v in pathway_elements.items():
output_pathway += f"\n{k}:\n"
for item in v:
output_pathway += f" ID: {item.get('id', '')}"
output_pathway += f" Name: {item.get('name', '')}"
#logging.info(f"Mygene Pathway result {name}, length is {len(output_pathway)}")
output_pathway = output_pathway.strip()
if output_pathway:
processed_result.append((output_pathway,{"citation_data": citation_data}))
return processed_result
def process_pubmed_result(result):
try:
root = ET.fromstring(result)
except Exception as e:
print(f"Cannot parse pubmed result, expected xml. {e}")
print("Adding whole document. Note this will lead to suboptimal results.")
return result if isinstance(result, list) else [result]
processed_result = []
for article in root:
res_ = ""
citation_data = ""
for title in article.iter("Title"):
res_ += f"{title.text}\n"
citation_data += f"{title.text}\n"
for abstract in article.iter("AbstractText"):
res_ += f"{abstract.text}\n"
for author in article.iter("Author"):
try:
citation_data += f"{author.find('LastName').text}"
citation_data += f", {author.find('ForeName').text}\n"
except:
pass
for journal in article.iter("Journal"):
res_ += f"{journal.find('Title').text}\n"
citation_data += f"{journal.find('Title').text}\n"
for volume in article.iter("Volume"):
citation_data += f"{volume.text}\n"
for issue in article.iter("Issue"):
citation_data += f"{issue.text}\n"
for pubdate in article.iter("PubDate"):
try:
year = pubdate.find("Year").text
citation_data += f"{year}"
month = pubdate.find("Month").text
citation_data += f"-{month}"
day = pubdate.find("Day").text
citation_data += f"-{day}\n"
except:
pass
for doi in article.iter("ELocationID"):
if doi.get("EIdType") == "doi":
res_ += f"{doi.text}\n"
if res_:
processed_result.append((res_,{"citation_data": citation_data}))
return processed_result
def get_code_params(code: str, preparam_text: str, postparam_text: str):
l = len(preparam_text)
preparam_index = code.find(preparam_text)
postparam_index = code.find(postparam_text)
if preparam_index == -1 or postparam_index == -1:
return
params = code[preparam_index + l : postparam_index].strip()
if params == "":
return
return params
def validate_llm_response(goal, response):
validation_prompt = f"I gave an LLM this goal: '{goal}' and it gave this response: '{response}'. Is this reasonable, or did something go wrong? [yes|no]"
validation_response = (
openai.Completion.create(
engine="text-davinci-003", prompt=validation_prompt, temperature=0.0
)
.choices[0]
.text.strip()
)
if validation_response.lower() == "yes":
return True
else:
return False
def generate_tool_prompt(task):
if "MYVARIANT" in task:
api_name = "myvariant"
elif "MYGENE" in task:
api_name = "mygene"
elif "PUBMED" in task:
api_name = "PubMed"
else:
print(f"Error. Tool not found in task: {task}")
return None
api_info = api_info_mapping[api_name]
prompt = f"""You have access to query the {api_name} API. If a task starts with '{api_name.upper()}:' then you should create the code to query the {api_name} API based off the documentation and return the code to complete your task. If you use the {api_name} API, do not answer with words, simply write the parameters used to call the function then cease output. Be sure it is valid python that will execute in a python interpreter.
---
Here is the {api_name} documentation
{api_info}
---
You should change the parameters to fit your specific task.
""".strip()
return prompt
def get_ada_embedding(text):
ada_embedding_max_size = 8191
text = text.replace("\n", " ")
if num_tokens_from_string(text) > ada_embedding_max_size:
# There must be a better way to do this.
text = text[:ada_embedding_max_size]
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[
"data"
][0]["embedding"]
def insert_doc_llama_index(index, doc_id, data, metadata={}, embedding=None):
if not embedding:
embedding = get_ada_embedding(data)
doc = Document(text=data, embedding=embedding, doc_id=doc_id, metadata=metadata)
doc.excluded_llm_metadata_keys = ['citation_data']
doc.excluded_embed_metadata_keys = ['citation_data']
index.insert(doc)
def handle_python_result(result, cache, task, doc_store, doc_store_task_key):
results_returned = True
params = result
doc_store["tasks"][doc_store_task_key]["result_code"] = result
tool = task.split(":")[0]
if tool == "MYGENE":
result = "from api.mygene_wrapper import mygene_wrapper\n" + result + "\nret = mygene_wrapper(query_term, size, from_)"
elif tool == "MYVARIANT":
result = "from api.myvariant_wrapper import myvariant_wrapper\n" + result + "\nret = myvariant_wrapper(query_term)"
elif tool == "PUBMED":
result = "from api.pubmed_wrapper import pubmed_wrapper\n" + result + "\nret = pubmed_wrapper(query_term, retmax, retstart)"
executed_result = execute_python(result)
if type(executed_result) is list:
executed_result = list(filter(lambda x : x, executed_result))
if (executed_result is not None) and (not executed_result): # Execution complete succesfully, but executed result was empty list
results_returned = False
result = "NOTE: Code returned no results\n\n" + result
print(Fore.BLUE + f"\nTask '{task}' completed but returned no results")
if "MYVARIANT" in task:
if results_returned:
cache["MYVARIANT"].append(f"---\n{params}---\n")
else:
cache["MYVARIANT"].append(f"---\nNote: This call returned no results\n{params}---\n")
processed_result = process_myvariant_result(executed_result)
if "MYGENE" in task:
if results_returned:
cache["MYGENE"].append(f"---\n{params}---\n")
else:
cache["MYGENE"].append(f"---\nNote: This call returned no results\n{params}---\n")
processed_result = process_mygene_result(executed_result)
if "PUBMED" in task:
if results_returned:
cache["PUBMED"].append(f"---\n{params}---\n")
else:
cache["PUBMED"].append(f"---\nNote: This call returned no results\n{params}---\n")
processed_result = process_pubmed_result(executed_result)
if executed_result is None:
result = "NOTE: Code did not run succesfully\n\n" + result
print(Fore.BLUE + f"Task '{task}' failed. Code {result} did not run succesfully.")
if "MYGENE" in task:
cache["MYGENE"].append(f"---\nNote: This call did not run succesfully\n{params}---\n")
if "PUBMED" in task:
cache["PUBMED"].append(f"---\nNote: This call did not run succesfully\n{params}---\n")
if "MYVARIANT" in task:
cache["MYVARIANT"].append(f"---\nNote: This call did not run succesfully\n{params}---\n")
return
return processed_result
def handle_results(result, index, doc_store, doc_store_key, task_id_counter, RESULT_CUTOFF):
for i, r in enumerate(result):
res, metadata = r[0], r[1]
res = str(res)[
:RESULT_CUTOFF
] # Occasionally an enormous result will slow the program to a halt. Not ideal to lose results but putting in place for now.
vectorized_data = get_ada_embedding(res)
task_id = f"doc_id_{task_id_counter}_{i}"
insert_doc_llama_index(index=index, doc_id=task_id, data=res, metadata=metadata, embedding=vectorized_data)
doc_store["tasks"][doc_store_key]["results"].append(
{
"task_id_counter": task_id_counter,
"vectorized_data": vectorized_data,
"output": res,
"metadata": metadata,
}
)
def query_knowledge_base(
index,
query="Give a detailed but terse overview of all the information. Start with a high level summary and then go into details. Do not include any further instruction. Do not include filler words.",
response_mode="tree_summarize",
top_k=50,
list_index=False
):
if not index.docstore.docs:
print(Fore.RED + "NO INFORMATION IN LLAMA INDEX")
return
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=top_k,
)
# configure response synthesizer
response_synthesizer = ResponseSynthesizer.from_args(
response_mode="tree_summarize",
)
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
)
if list_index:
query_response = index.query(
query, response_mode="default"
)
else:
# From llama index docs: Empirically, setting response_mode="tree_summarize" also leads to better summarization results.
query_response = query_engine.query(query)
extra_info = ""
if query_response.metadata:
try:
extra_info = [x.get("citation_data") for x in query_response.metadata.values()]
if not any(extra_info):
extra_info = []
except Exception as e:
print("Issue getting extra info from llama index")
return query_response.response, '\n\n'.join(extra_info)
def create_index(api_key,summaries=[], temperature=0.0, model_name="gpt-3.5-turbo-16k", max_tokens=6000):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
documents = []
for i, summary in enumerate(summaries):
doc = Document(text=summary, doc_id=str(i))
doc.excluded_llm_metadata_keys = ['citation_data']
doc.excluded_embed_metadata_keys = ['citation_data']
documents.append(doc)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=4000)
return GPTVectorStoreIndex(documents, service_context=service_context)
def create_graph_index(api_key, indicies=[], summaries=[], temperature=0.0, model_name="text-davinci-003", max_tokens=2000):
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
graph = ComposableGraph.from_indices(
GPTListIndex,
indicies,
index_summaries=summaries,
service_context=service_context
)
return graph
def create_list_index(api_key, summaries=[], temperature=0.0, model_name="text-davinci-003", max_tokens=2000):
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
documents = []
for i, summary in enumerate(summaries):
documents.append(Document(text=summary, doc_id=str(i)))
index = GPTListIndex.from_documents(documents, service_context=service_context)
return index
@backoff.on_exception(
partial(backoff.expo, max_value=50),
(openai.error.RateLimitError, openai.error.APIError, openai.error.APIConnectionError, openai.error.ServiceUnavailableError, openai.error.Timeout),
)
def get_gpt_completion(
prompt,
temp=0.0,
engine="text-davinci-003",
top_p=1,
frequency_penalty=0,
presence_penalty=0,
):
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=get_max_completion_len(prompt),
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
)
return response.choices[0].text.strip()
@backoff.on_exception(
partial(backoff.expo, max_value=50),
(openai.error.RateLimitError, openai.error.APIError, openai.error.APIConnectionError, openai.error.ServiceUnavailableError, openai.error.Timeout),
)
def get_gpt_chat_completion(
system_prompt, user_prompt, model="gpt-3.5-turbo-16k", temp=0.0
):
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
temperature=temp,
)
return response.choices[0]["message"]["content"].strip()
### FILE UTILS ###
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def write_file(path, contents, mode="w"):
with open(path, mode) as f:
f.write(contents)
def read_file(path, mode="r"):
with open(path, mode) as f:
contents = f.read()
if not contents:
print(f"WARNING: file {path} empty")
return contents
def sanitize_dir_name(dir_name):
# Remove invalid characters
dir_name = re.sub(r'[<>:"/\|?*]', '_', dir_name)
dir_name = dir_name.replace(' ', '_')
# Remove leading period
if dir_name.startswith('.'):
dir_name = dir_name[1:]
return dir_name
def save(
index,
doc_store,
OBJECTIVE,
current_datetime,
task_id_counter,
task_list,
completed_tasks,
cache,
reload_count,
summaries,
):
# Make basepath.
path = os.path.join("./out", sanitize_dir_name(OBJECTIVE) + "_" + current_datetime)
make_dir(path)
# Save llama index.
index.storage_context.persist(persist_dir=os.path.join(path, "index.json"))
# Save program state.
state = {
"summaries": summaries,
"reload_count": reload_count,
"task_id_counter": task_id_counter,
"task_list": list(task_list),
"completed_tasks": completed_tasks,
"cache": dict(cache),
"current_datetime": current_datetime,
"objective": OBJECTIVE,
}
with open(os.path.join(path, "state.json"), "w") as outfile:
json.dump(state, outfile)
# Save results.
if "key_results" in doc_store:
if reload_count:
new_time = str(time.strftime("%Y-%m-%d_%H-%M-%S"))
header = f"# {OBJECTIVE}\nDate: {new_time}\n\n"
else:
header = f"# {OBJECTIVE}\nDate: {current_datetime}\n\n"
key_findings_path = os.path.join(path, f"key_findings_{reload_count}.md")
write_file(key_findings_path, header, mode="a+")
for res in doc_store["key_results"]:
content = f"{res[0]}{res[1]}"
write_file(key_findings_path, content, mode="a+")
for task, doc in doc_store["tasks"].items():
doc_path = os.path.join(path, task)
make_dir(doc_path)
result_path = os.path.join(doc_path, "results")
make_dir(result_path)
if "executive_summary" in doc:
write_file(os.path.join(result_path, "executive_summary.txt"), doc["executive_summary"])
if "result_code" in doc:
write_file(os.path.join(result_path, "api_call.txt"), doc["result_code"])
for i, result in enumerate(doc["results"]):
result_path_i = os.path.join(result_path, str(i))
make_dir(result_path_i)
write_file(os.path.join(result_path_i, "output.txt"), result["output"])
write_file(
os.path.join(result_path_i, "vector.txt"),
str(result["vectorized_data"]),
)
def load(path):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(
temperature=0,
openai_api_key=api_key,
model_name="gpt-3.5-turbo-16k",
max_tokens=6000,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=4000)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=os.path.join(path, "index.json"))
index = load_index_from_storage(storage_context=storage_context, service_context=service_context)
state_path = os.path.join(path, "state.json")
if os.path.exists(state_path):
with open(state_path, "r") as f:
json_data = json.load(f)
try:
reload_count = json_data["reload_count"] + 1
task_id_counter = json_data["task_id_counter"]
task_list = json_data["task_list"]
completed_tasks = json_data["completed_tasks"]
cache = defaultdict(list, json_data["cache"])
current_datetime = json_data["current_datetime"]
objective = json_data["objective"]
summaries = json_data["summaries"]
except KeyError as e:
raise Exception(
f"Missing key '{e.args[0]}' in JSON file at path '{state_path}'"
)
return (
index,
task_id_counter,
deque(task_list),
completed_tasks,
cache,
current_datetime,
objective,
reload_count,
summaries,
)
| [
"llama_index.ResponseSynthesizer.from_args",
"llama_index.indices.composability.ComposableGraph.from_indices",
"llama_index.query_engine.RetrieverQueryEngine",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.GPTVectorStoreIndex",
"llama_index.GPTListIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.Document"
] | [((3024, 3060), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (3045, 3060), False, 'import tiktoken\n'), ((14555, 14629), 'llama_index.Document', 'Document', ([], {'text': 'data', 'embedding': 'embedding', 'doc_id': 'doc_id', 'metadata': 'metadata'}), '(text=data, embedding=embedding, doc_id=doc_id, metadata=metadata)\n', (14563, 14629), False, 'from llama_index import Document, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex\n'), ((18745, 18802), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'top_k'}), '(index=index, similarity_top_k=top_k)\n', (18765, 18802), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((18892, 18953), 'llama_index.ResponseSynthesizer.from_args', 'ResponseSynthesizer.from_args', ([], {'response_mode': '"""tree_summarize"""'}), "(response_mode='tree_summarize')\n", (18921, 18953), False, 'from llama_index import VectorStoreIndex, ResponseSynthesizer\n'), ((19017, 19106), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (19037, 19106), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((20428, 20502), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(4000)'}), '(llm_predictor=llm_predictor, chunk_size=4000)\n', (20456, 20502), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((20514, 20577), 'llama_index.GPTVectorStoreIndex', 'GPTVectorStoreIndex', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (20533, 20577), False, 'from llama_index import Document, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex\n'), ((20944, 21001), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (20972, 21001), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((21016, 21133), 'llama_index.indices.composability.ComposableGraph.from_indices', 'ComposableGraph.from_indices', (['GPTListIndex', 'indicies'], {'index_summaries': 'summaries', 'service_context': 'service_context'}), '(GPTListIndex, indicies, index_summaries=\n summaries, service_context=service_context)\n', (21044, 21133), False, 'from llama_index.indices.composability import ComposableGraph\n'), ((21537, 21594), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (21565, 21594), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((21743, 21814), 'llama_index.GPTListIndex.from_documents', 'GPTListIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (21770, 21814), False, 'from llama_index import Document, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex\n'), ((21861, 21896), 'functools.partial', 'partial', (['backoff.expo'], {'max_value': '(50)'}), '(backoff.expo, max_value=50)\n', (21868, 21896), False, 'from functools import partial\n'), ((22854, 23020), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'model', 'messages': "[{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content':\n user_prompt}]", 'temperature': 'temp'}), "(model=model, messages=[{'role': 'system',\n 'content': system_prompt}, {'role': 'user', 'content': user_prompt}],\n temperature=temp)\n", (22882, 23020), False, 'import openai\n'), ((22549, 22584), 'functools.partial', 'partial', (['backoff.expo'], {'max_value': '(50)'}), '(backoff.expo, max_value=50)\n', (22556, 22584), False, 'from functools import partial\n'), ((23607, 23644), 're.sub', 're.sub', (['"""[<>:"/\\\\|?*]"""', '"""_"""', 'dir_name'], {}), '(\'[<>:"/\\\\|?*]\', \'_\', dir_name)\n', (23613, 23644), False, 'import re\n'), ((26372, 26446), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(4000)'}), '(llm_predictor=llm_predictor, chunk_size=4000)\n', (26400, 26446), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((26588, 26682), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (26611, 26682), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((26695, 26727), 'os.path.join', 'os.path.join', (['path', '"""state.json"""'], {}), "(path, 'state.json')\n", (26707, 26727), False, 'import os\n'), ((26735, 26761), 'os.path.exists', 'os.path.exists', (['state_path'], {}), '(state_path)\n', (26749, 26761), False, 'import os\n'), ((935, 967), 'logging.getLogger', 'logging.getLogger', (['"""llama_index"""'], {}), "('llama_index')\n", (952, 967), False, 'import logging\n'), ((2855, 2871), 'collections.deque', 'deque', (['task_list'], {}), '(task_list)\n', (2860, 2871), False, 'from collections import defaultdict, deque\n'), ((10387, 10408), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['result'], {}), '(result)\n', (10400, 10408), True, 'import xml.etree.ElementTree as ET\n'), ((23194, 23214), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (23208, 23214), False, 'import os\n'), ((23224, 23241), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (23235, 23241), False, 'import os\n'), ((24651, 24676), 'json.dump', 'json.dump', (['state', 'outfile'], {}), '(state, outfile)\n', (24660, 24676), False, 'import json\n'), ((24991, 25044), 'os.path.join', 'os.path.join', (['path', 'f"""key_findings_{reload_count}.md"""'], {}), "(path, f'key_findings_{reload_count}.md')\n", (25003, 25044), False, 'import os\n'), ((25334, 25358), 'os.path.join', 'os.path.join', (['path', 'task'], {}), '(path, task)\n', (25346, 25358), False, 'import os\n'), ((25408, 25441), 'os.path.join', 'os.path.join', (['doc_path', '"""results"""'], {}), "(doc_path, 'results')\n", (25420, 25441), False, 'import os\n'), ((27558, 27574), 'collections.deque', 'deque', (['task_list'], {}), '(task_list)\n', (27563, 27574), False, 'from collections import defaultdict, deque\n'), ((4361, 4383), 'markdown.markdown', 'markdown.markdown', (['res'], {}), '(res)\n', (4378, 4383), False, 'import markdown\n'), ((4411, 4443), 'markdown.markdown', 'markdown.markdown', (['citation_data'], {}), '(citation_data)\n', (4428, 4443), False, 'import markdown\n'), ((19965, 20075), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (19975, 20075), False, 'from langchain.chat_models import ChatOpenAI\n'), ((20751, 20857), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (20757, 20857), False, 'from langchain import OpenAI\n'), ((21344, 21450), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (21350, 21450), False, 'from langchain import OpenAI\n'), ((24193, 24225), 'os.path.join', 'os.path.join', (['path', '"""index.json"""'], {}), "(path, 'index.json')\n", (24205, 24225), False, 'import os\n'), ((24592, 24624), 'os.path.join', 'os.path.join', (['path', '"""state.json"""'], {}), "(path, 'state.json')\n", (24604, 24624), False, 'import os\n'), ((26186, 26289), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'api_key', 'model_name': '"""gpt-3.5-turbo-16k"""', 'max_tokens': '(6000)'}), "(temperature=0, openai_api_key=api_key, model_name=\n 'gpt-3.5-turbo-16k', max_tokens=6000)\n", (26196, 26289), False, 'from langchain.chat_models import ChatOpenAI\n'), ((26541, 26573), 'os.path.join', 'os.path.join', (['path', '"""index.json"""'], {}), "(path, 'index.json')\n", (26553, 26573), False, 'import os\n'), ((26828, 26840), 'json.load', 'json.load', (['f'], {}), '(f)\n', (26837, 26840), False, 'import json\n'), ((14291, 14360), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': '[text]', 'model': '"""text-embedding-ada-002"""'}), "(input=[text], model='text-embedding-ada-002')\n", (14314, 14360), False, 'import openai\n'), ((24785, 24819), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%H-%M-%S"""'], {}), "('%Y-%m-%d_%H-%M-%S')\n", (24798, 24819), False, 'import time\n'), ((25535, 25585), 'os.path.join', 'os.path.join', (['result_path', '"""executive_summary.txt"""'], {}), "(result_path, 'executive_summary.txt')\n", (25547, 25585), False, 'import os\n'), ((25669, 25710), 'os.path.join', 'os.path.join', (['result_path', '"""api_call.txt"""'], {}), "(result_path, 'api_call.txt')\n", (25681, 25710), False, 'import os\n'), ((25916, 25957), 'os.path.join', 'os.path.join', (['result_path_i', '"""output.txt"""'], {}), "(result_path_i, 'output.txt')\n", (25928, 25957), False, 'import os\n'), ((26017, 26058), 'os.path.join', 'os.path.join', (['result_path_i', '"""vector.txt"""'], {}), "(result_path_i, 'vector.txt')\n", (26029, 26058), False, 'import os\n'), ((27121, 27158), 'collections.defaultdict', 'defaultdict', (['list', "json_data['cache']"], {}), "(list, json_data['cache'])\n", (27132, 27158), False, 'from collections import defaultdict, deque\n'), ((12834, 12933), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'validation_prompt', 'temperature': '(0.0)'}), "(engine='text-davinci-003', prompt=\n validation_prompt, temperature=0.0)\n", (12858, 12933), False, 'import openai\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler
from llama_index.legacy.callbacks.arize_phoenix_callback import (
arize_phoenix_callback_handler,
)
from llama_index.legacy.callbacks.base_handler import BaseCallbackHandler
from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.legacy.callbacks.open_inference_callback import (
OpenInferenceCallbackHandler,
)
from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.legacy
llama_index.legacy.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler",
"llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler"
] | [((1239, 1274), 'llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1259, 1274), False, 'from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1332, 1375), 'llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1360, 1375), False, 'from llama_index.legacy.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1433, 1478), 'llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.legacy.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1532, 1573), 'llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1558, 1573), False, 'from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1629, 1662), 'llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1647, 1662), False, 'from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1715, 1755), 'llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1740, 1755), False, 'from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1806, 1837), 'llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1822, 1837), False, 'from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1889, 1928), 'llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1913, 1928), False, 'from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler
from llama_index.legacy.callbacks.arize_phoenix_callback import (
arize_phoenix_callback_handler,
)
from llama_index.legacy.callbacks.base_handler import BaseCallbackHandler
from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.legacy.callbacks.open_inference_callback import (
OpenInferenceCallbackHandler,
)
from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.legacy
llama_index.legacy.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler",
"llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler"
] | [((1239, 1274), 'llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1259, 1274), False, 'from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1332, 1375), 'llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1360, 1375), False, 'from llama_index.legacy.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1433, 1478), 'llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.legacy.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1532, 1573), 'llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1558, 1573), False, 'from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1629, 1662), 'llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1647, 1662), False, 'from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1715, 1755), 'llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1740, 1755), False, 'from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1806, 1837), 'llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1822, 1837), False, 'from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1889, 1928), 'llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1913, 1928), False, 'from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler\n')] |
import sys
from langchain import OpenAI
from pathlib import Path
import llama_index as li
#from llamahub.connectors import TextFileConnector
from llama_index import SimpleDirectoryReader,GPTListIndex,LLMPredictor
file_name = sys.argv[1]
llm_predictor = LLMPredictor(llm=OpenAI(model_name="gpt-3.5-turbo")) #temperature=0,
docs = SimpleDirectoryReader('.', [file_name]).load_data()
index = GPTListIndex(docs)
ex = """Today we finish off our study of collaborative filtering by looking closely at embeddings—a critical building block of many deep learning algorithms. Then we’ll dive into convolutional neural networks (CNNs) and see how they really work. We’ve used plenty of CNNs through this course, but we haven’t peeked inside them to see what’s really going on in there. As well as learning about their most fundamental building block, the convolution, we’ll also look at pooling, dropout, and more."""
q = f"""Here's an example of a lesson summary from a previous fast.ai lesson: "{ex}" Write a four paragraph summary of the fast.ai lesson contained in the following transcript, using a similar informal writing style to the above summary from the previous lesson."""
summary = index.query(q, response_mode="tree_summarize", llm_predictor=llm_predictor)
Path(f'{Path(file_name).stem}-summ.txt').write_text(str(summary))
| [
"llama_index.GPTListIndex",
"llama_index.SimpleDirectoryReader"
] | [((391, 409), 'llama_index.GPTListIndex', 'GPTListIndex', (['docs'], {}), '(docs)\n', (403, 409), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((271, 305), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (277, 305), False, 'from langchain import OpenAI\n'), ((331, 370), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""."""', '[file_name]'], {}), "('.', [file_name])\n", (352, 370), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((1270, 1285), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1274, 1285), False, 'from pathlib import Path\n')] |
import sys
from langchain import OpenAI
from pathlib import Path
import llama_index as li
#from llamahub.connectors import TextFileConnector
from llama_index import SimpleDirectoryReader,GPTListIndex,LLMPredictor
file_name = sys.argv[1]
llm_predictor = LLMPredictor(llm=OpenAI(model_name="gpt-3.5-turbo")) #temperature=0,
docs = SimpleDirectoryReader('.', [file_name]).load_data()
index = GPTListIndex(docs)
ex = """Today we finish off our study of collaborative filtering by looking closely at embeddings—a critical building block of many deep learning algorithms. Then we’ll dive into convolutional neural networks (CNNs) and see how they really work. We’ve used plenty of CNNs through this course, but we haven’t peeked inside them to see what’s really going on in there. As well as learning about their most fundamental building block, the convolution, we’ll also look at pooling, dropout, and more."""
q = f"""Here's an example of a lesson summary from a previous fast.ai lesson: "{ex}" Write a four paragraph summary of the fast.ai lesson contained in the following transcript, using a similar informal writing style to the above summary from the previous lesson."""
summary = index.query(q, response_mode="tree_summarize", llm_predictor=llm_predictor)
Path(f'{Path(file_name).stem}-summ.txt').write_text(str(summary))
| [
"llama_index.GPTListIndex",
"llama_index.SimpleDirectoryReader"
] | [((391, 409), 'llama_index.GPTListIndex', 'GPTListIndex', (['docs'], {}), '(docs)\n', (403, 409), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((271, 305), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (277, 305), False, 'from langchain import OpenAI\n'), ((331, 370), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""."""', '[file_name]'], {}), "('.', [file_name])\n", (352, 370), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((1270, 1285), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1274, 1285), False, 'from pathlib import Path\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
skip_load: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
if skip_load:
return None
# loads the module into memory
if override_path:
path = f"{dirpath}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
else:
path = f"{dirpath}/{module_id}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7432, 7500), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7452, 7500), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8830, 8857), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8851, 8857), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8438, 8498), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8466, 8498), False, 'from importlib import util\n'), ((8668, 8728), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8696, 8728), False, 'from importlib import util\n'), ((9280, 9382), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9293, 9382), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')] |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.core.embeddings.base import BaseEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.llm import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser, TextSplitter
from llama_index.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.prompts.base import BasePromptTemplate
from llama_index.schema import TransformComponent
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.extractors.loading import load_extractor
from llama_index.llm_predictor.loading import load_predictor
from llama_index.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.extractors.loading.load_extractor",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.utils.resolve_embed_model"
] | [((962, 989), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (979, 989), False, 'import logging\n'), ((1764, 1821), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1794, 1821), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5128, 5156), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5132, 5156), False, 'from typing import Any, List, Optional, cast\n'), ((7575, 7607), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7594, 7607), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10019, 10047), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10023, 10047), False, 'from typing import Any, List, Optional, cast\n'), ((11263, 11295), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11282, 11295), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14437, 14487), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14451, 14487), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14511, 14561), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14527, 14561), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14587, 14645), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14609, 14645), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6319, 6338), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6334, 6338), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6506, 6522), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6517, 6522), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6954, 7020), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6966, 7020), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8483, 8496), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8494, 8496), False, 'from llama_index.logger import LlamaLogger\n'), ((10558, 10574), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10569, 10574), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10603, 10624), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10615, 10624), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1363, 1380), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1378, 1380), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14821, 14843), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14832, 14843), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14915, 14940), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14929, 14940), False, 'from llama_index.extractors.loading import load_extractor\n')] |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.core.embeddings.base import BaseEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.llm import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser, TextSplitter
from llama_index.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.prompts.base import BasePromptTemplate
from llama_index.schema import TransformComponent
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.extractors.loading import load_extractor
from llama_index.llm_predictor.loading import load_predictor
from llama_index.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.extractors.loading.load_extractor",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.utils.resolve_embed_model"
] | [((962, 989), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (979, 989), False, 'import logging\n'), ((1764, 1821), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1794, 1821), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5128, 5156), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5132, 5156), False, 'from typing import Any, List, Optional, cast\n'), ((7575, 7607), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7594, 7607), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10019, 10047), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10023, 10047), False, 'from typing import Any, List, Optional, cast\n'), ((11263, 11295), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11282, 11295), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14437, 14487), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14451, 14487), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14511, 14561), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14527, 14561), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14587, 14645), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14609, 14645), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6319, 6338), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6334, 6338), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6506, 6522), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6517, 6522), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6954, 7020), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6966, 7020), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8483, 8496), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8494, 8496), False, 'from llama_index.logger import LlamaLogger\n'), ((10558, 10574), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10569, 10574), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10603, 10624), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10615, 10624), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1363, 1380), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1378, 1380), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14821, 14843), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14832, 14843), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14915, 14940), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14929, 14940), False, 'from llama_index.extractors.loading import load_extractor\n')] |
import utils
import os
import requests
import llama_index
import torch
import llama_cpp
from llama_index import SimpleDirectoryReader
from llama_index import Document
from llama_index import VectorStoreIndex
from llama_index import ServiceContext
from llama_index import LLMPredictor
# Paramas
llama = True
### Get data
dirpath = 'related_works/Cloud_VM/'
filename = dirpath + 'ey.pdf'
url = 'https://assets.ey.com/content/dam/ey-sites/ey-com/nl_nl/topics/jaarverslag/downloads-pdfs/2022-2023/ey-nl-financial-statements-2023-en.pdf'
if not os.path.exists(filename):
print(f"Downloading {filename} from {url}...")
response = requests.get(url)
with open(dirpath + 'ey.pdf', 'wb') as f:
f.write(response.content)
documents = SimpleDirectoryReader(
input_files=[filename]
).load_data()
### Print data
print(type(documents), "\n")
print(len(documents), "\n")
print(type(documents[0]))
print(documents[0])
### Create doc object
document = Document(text="\n\n".join([doc.text for doc in documents]))
### load model
model_name_or_path = "TheBloke/Llama-2-13B-chat-GGML"
model_basename = "llama-2-13b-chat.ggmlv3.q5_1.bin" # the model is in bin format
from huggingface_hub import hf_hub_download
model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
if llama:
# GPU
from llama_cpp import Llama
llm = None
llm = Llama(
model_path=model_path,
n_threads=2, # CPU cores
n_batch=512, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
n_gpu_layers=43, # Change this value based on your model and your GPU VRAM pool.
n_ctx=4096, # Context window
)
else:
from transformers import LlamaTokenizer, LlamaForCausalLM
tokenizer = LlamaTokenizer.from_pretrained('ChanceFocus/finma-7b-full')
llm = LlamaForCausalLM.from_pretrained('ChanceFocus/finma-7b-full', device_map='auto')
##### The replicate endpoint
from llama_index.llms import Replicate
from llama_index import ServiceContext, set_global_service_context
from llama_index.llms.llama_utils import (
messages_to_prompt,
completion_to_prompt,
)
LLAMA_13B_V2_CHAT = "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5"
# inject custom system prompt into llama-2
def custom_completion_to_prompt(completion: str) -> str:
return completion_to_prompt(
completion,
system_prompt=(
"You are a Q&A assistant. Your goal is to answer questions as "
"accurately as possible is the instructions and context provided."
),
)
llm = Replicate(
model=LLAMA_13B_V2_CHAT,
temperature=0.01,
# override max tokens since it's interpreted
# as context window instead of max tokens
context_window=4096,
# override completion representation for llama 2
completion_to_prompt=custom_completion_to_prompt,
# if using llama 2 for data agents, also override the message representation
messages_to_prompt=messages_to_prompt,
)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model="local:BAAI/bge-small-en-v1.5"
)
index = VectorStoreIndex.from_documents([document],
service_context=service_context)
query_engine = index.as_query_engine()
response = query_engine.query(
"What actions is Ernst & Young Global Limited taking to address climate change issues?"
)
print(str(response))
# ## Evaluation setup using TruLens
eval_questions = []
with open('eval_questions.txt', 'r') as file:
for line in file:
# Remove newline character and convert to integer
item = line.strip()
print(item)
eval_questions.append(item)
# You can try your own question:
new_question = "What is the right AI job for me?"
eval_questions.append(new_question)
print(eval_questions)
from trulens_eval import Tru
tru = Tru()
tru.reset_database()
from utils import get_prebuilt_trulens_recorder
tru_recorder = get_prebuilt_trulens_recorder(query_engine,
app_id="Direct Query Engine")
with tru_recorder as recording:
for question in eval_questions:
response = query_engine.query(question)
records, feedback = tru.get_records_and_feedback(app_ids=[])
records.head()
# launches on http://localhost:8501/
tru.run_dashboard()
# ## Advanced RAG pipeline
# ### 1. Sentence Window retrieval
from utils import build_sentence_window_index
sentence_index = build_sentence_window_index(
document,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="sentence_index"
)
from utils import get_sentence_window_query_engine
sentence_window_engine = get_sentence_window_query_engine(sentence_index)
window_response = sentence_window_engine.query(
"how do I get started on a personal project in AI?"
)
print(str(window_response))
tru.reset_database()
tru_recorder_sentence_window = get_prebuilt_trulens_recorder(
sentence_window_engine,
app_id = "Sentence Window Query Engine"
)
for question in eval_questions:
with tru_recorder_sentence_window as recording:
response = sentence_window_engine.query(question)
print(question)
print(str(response))
tru.get_leaderboard(app_ids=[])
# launches on http://localhost:8501/
tru.run_dashboard()
# ### 2. Auto-merging retrieval
from utils import build_automerging_index
automerging_index = build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index"
)
from utils import get_automerging_query_engine
automerging_query_engine = get_automerging_query_engine(
automerging_index,
)
auto_merging_response = automerging_query_engine.query(
"How do I build a portfolio of AI projects?"
)
print(str(auto_merging_response))
tru.reset_database()
tru_recorder_automerging = get_prebuilt_trulens_recorder(automerging_query_engine,
app_id="Automerging Query Engine")
for question in eval_questions:
with tru_recorder_automerging as recording:
response = automerging_query_engine.query(question)
print(question)
print(response)
tru.get_leaderboard(app_ids=[])
# launches on http://localhost:8501/
tru.run_dashboard()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.llama_utils.completion_to_prompt",
"llama_index.llms.Replicate"
] | [((1239, 1307), 'huggingface_hub.hf_hub_download', 'hf_hub_download', ([], {'repo_id': 'model_name_or_path', 'filename': 'model_basename'}), '(repo_id=model_name_or_path, filename=model_basename)\n', (1254, 1307), False, 'from huggingface_hub import hf_hub_download\n'), ((2628, 2799), 'llama_index.llms.Replicate', 'Replicate', ([], {'model': 'LLAMA_13B_V2_CHAT', 'temperature': '(0.01)', 'context_window': '(4096)', 'completion_to_prompt': 'custom_completion_to_prompt', 'messages_to_prompt': 'messages_to_prompt'}), '(model=LLAMA_13B_V2_CHAT, temperature=0.01, context_window=4096,\n completion_to_prompt=custom_completion_to_prompt, messages_to_prompt=\n messages_to_prompt)\n', (2637, 2799), False, 'from llama_index.llms import Replicate\n'), ((3062, 3148), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local:BAAI/bge-small-en-v1.5"""'}), "(llm=llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5')\n", (3090, 3148), False, 'from llama_index import ServiceContext, set_global_service_context\n'), ((3158, 3234), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[document]'], {'service_context': 'service_context'}), '([document], service_context=service_context)\n', (3189, 3234), False, 'from llama_index import VectorStoreIndex\n'), ((3909, 3914), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (3912, 3914), False, 'from trulens_eval import Tru\n'), ((4002, 4075), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['query_engine'], {'app_id': '"""Direct Query Engine"""'}), "(query_engine, app_id='Direct Query Engine')\n", (4031, 4075), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((4503, 4621), 'utils.build_sentence_window_index', 'build_sentence_window_index', (['document', 'llm'], {'embed_model': '"""local:BAAI/bge-small-en-v1.5"""', 'save_dir': '"""sentence_index"""'}), "(document, llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5', save_dir='sentence_index')\n", (4530, 4621), False, 'from utils import build_sentence_window_index\n'), ((4713, 4761), 'utils.get_sentence_window_query_engine', 'get_sentence_window_query_engine', (['sentence_index'], {}), '(sentence_index)\n', (4745, 4761), False, 'from utils import get_sentence_window_query_engine\n'), ((4951, 5048), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['sentence_window_engine'], {'app_id': '"""Sentence Window Query Engine"""'}), "(sentence_window_engine, app_id=\n 'Sentence Window Query Engine')\n", (4980, 5048), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((5440, 5554), 'utils.build_automerging_index', 'build_automerging_index', (['documents', 'llm'], {'embed_model': '"""local:BAAI/bge-small-en-v1.5"""', 'save_dir': '"""merging_index"""'}), "(documents, llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5', save_dir='merging_index')\n", (5463, 5554), False, 'from utils import build_automerging_index\n'), ((5644, 5691), 'utils.get_automerging_query_engine', 'get_automerging_query_engine', (['automerging_index'], {}), '(automerging_index)\n', (5672, 5691), False, 'from utils import get_automerging_query_engine\n'), ((5891, 5986), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['automerging_query_engine'], {'app_id': '"""Automerging Query Engine"""'}), "(automerging_query_engine, app_id=\n 'Automerging Query Engine')\n", (5920, 5986), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((545, 569), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (559, 569), False, 'import os\n'), ((641, 658), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (653, 658), False, 'import requests\n'), ((1386, 1473), 'llama_cpp.Llama', 'Llama', ([], {'model_path': 'model_path', 'n_threads': '(2)', 'n_batch': '(512)', 'n_gpu_layers': '(43)', 'n_ctx': '(4096)'}), '(model_path=model_path, n_threads=2, n_batch=512, n_gpu_layers=43,\n n_ctx=4096)\n', (1391, 1473), False, 'from llama_cpp import Llama\n'), ((1772, 1831), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['"""ChanceFocus/finma-7b-full"""'], {}), "('ChanceFocus/finma-7b-full')\n", (1802, 1831), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM\n'), ((1842, 1927), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['"""ChanceFocus/finma-7b-full"""'], {'device_map': '"""auto"""'}), "('ChanceFocus/finma-7b-full', device_map='auto'\n )\n", (1874, 1927), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM\n'), ((2382, 2567), 'llama_index.llms.llama_utils.completion_to_prompt', 'completion_to_prompt', (['completion'], {'system_prompt': '"""You are a Q&A assistant. Your goal is to answer questions as accurately as possible is the instructions and context provided."""'}), "(completion, system_prompt=\n 'You are a Q&A assistant. Your goal is to answer questions as accurately as possible is the instructions and context provided.'\n )\n", (2402, 2567), False, 'from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt\n'), ((752, 797), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[filename]'}), '(input_files=[filename])\n', (773, 797), False, 'from llama_index import SimpleDirectoryReader\n')] |
import logging
from dataclasses import dataclass
from typing import List, Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.llm import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser, TextSplitter
from llama_index.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.prompts.base import BasePromptTemplate
from llama_index.schema import TransformComponent
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.extractors.loading import load_extractor
from llama_index.llm_predictor.loading import load_predictor
from llama_index.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.extractors.loading.load_extractor",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.utils.resolve_embed_model"
] | [((1018, 1045), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1035, 1045), False, 'import logging\n'), ((1820, 1877), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1850, 1877), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((7504, 7536), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7523, 7536), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((11065, 11097), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11084, 11097), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14239, 14289), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14253, 14289), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14313, 14363), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14329, 14363), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14389, 14447), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14411, 14447), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6248, 6267), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6263, 6267), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6435, 6451), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6446, 6451), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6883, 6949), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6895, 6949), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8412, 8425), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8423, 8425), False, 'from llama_index.logger import LlamaLogger\n'), ((10360, 10376), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10371, 10376), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10405, 10426), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10417, 10426), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1419, 1436), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1434, 1436), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14623, 14645), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14634, 14645), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14717, 14742), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14731, 14742), False, 'from llama_index.extractors.loading import load_extractor\n')] |
import logging
from dataclasses import dataclass
from typing import List, Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.base import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser, TextSplitter
from llama_index.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.prompts.base import BasePromptTemplate
from llama_index.schema import TransformComponent
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
if not isinstance(self.llm_predictor, LLMPredictor):
raise ValueError("llm_predictor must be an instance of LLMPredictor")
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.extractors.loading import load_extractor
from llama_index.llm_predictor.loading import load_predictor
from llama_index.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.extractors.loading.load_extractor",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.utils.resolve_embed_model"
] | [((1019, 1046), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1036, 1046), False, 'import logging\n'), ((1821, 1878), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1851, 1878), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((7115, 7147), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7134, 7147), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10676, 10708), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (10695, 10708), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((13993, 14043), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14007, 14043), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14067, 14117), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14083, 14117), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14143, 14201), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14165, 14201), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6249, 6268), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6264, 6268), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6436, 6452), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6447, 6452), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6494, 6560), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6506, 6560), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8023, 8036), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8034, 8036), False, 'from llama_index.logger import LlamaLogger\n'), ((9971, 9987), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (9982, 9987), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10016, 10037), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10028, 10037), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1420, 1437), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1435, 1437), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14377, 14399), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14388, 14399), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14471, 14496), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14485, 14496), False, 'from llama_index.extractors.loading import load_extractor\n')] |
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import Ollama
from llama_index.vector_stores.qdrant import QdrantVectorStore
import llama_index
llama_index.set_global_handler("simple")
# re-initialize the vector store
client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
vector_store = QdrantVectorStore(client=client, collection_name="tweets")
# get the LLM again
llm = Ollama(model="mixtral")
service_context = ServiceContext.from_defaults(llm=llm,embed_model="local")
# load the index from the vector store
index = VectorStoreIndex.from_vector_store(vector_store=vector_store,service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=20)
response = query_engine.query("Does the author like web frameworks? Give details.")
print(response)
| [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.Ollama",
"llama_index.set_global_handler",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((219, 259), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (249, 259), False, 'import llama_index\n'), ((306, 354), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (332, 354), False, 'import qdrant_client\n'), ((379, 437), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""tweets"""'}), "(client=client, collection_name='tweets')\n", (396, 437), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((468, 491), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""mixtral"""'}), "(model='mixtral')\n", (474, 491), False, 'from llama_index.llms import Ollama\n'), ((511, 569), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (539, 569), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((620, 718), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (654, 718), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
import tkinter as tk
from screeninfo import get_monitors
from PIL import Image, ImageTk
import os
from tkinter import filedialog
import TextConverter as tc
from tkinter import messagebox
import platform
import pyperclip
import config
from threading import Thread
from Speech_functions import checking, asking
import textwrap
import time
from llama_index import VectorStoreIndex, SimpleDirectoryReader, GPTVectorStoreIndex
from langchain.memory import ConversationSummaryMemory
import llama_index
import re
import openai
import json
from tkinter import ttk
config.init()
from langchain.chat_models import ChatOpenAI
import pickle
# import ctypes
# import objc
"""
Changes to make:
- icons for all buttons
- rounded corners
- smooth animation for expanding/compressing window
- Change all text in window based on language
- Document for other files
"""
def print_function_name(func):
def wrapper(*args, **kwargs):
print(f'Executing {func.__name__}')
return func(*args, **kwargs)
return wrapper
class Quiz:
"""Quiz object for iterate questions"""
#@print_function_name
def __init__(self, quiz_input_string, num_quiz_questions = 5):
self.questions = [None for _ in range(num_quiz_questions)]
lines = quiz_input_string.split("\n")
for i in range(num_quiz_questions):
self.questions[i] = {
"question": lines[i * 6][3:],
"alternatives": ["", "", "", ""],
"answer": -1,
}
for j in range(4):
init_string = lines[i * 6 + j + 1][3:]
asterisk_index = init_string.find("*")
# Create the substring based on the asterisk index
if asterisk_index != -1:
init_string = init_string[:asterisk_index]
self.questions[i]["answer"] = j
self.questions[i]["alternatives"][j] = init_string
# self.questions is formatted like this: obj = [{question: "<q>", alternatives: ["alt1", "alt2", "alt3", "alt4"], answer: <0-3>}]
class Window(tk.Tk):
"""Main window"""
NUM_QUIZ_QUESTIONS = 5
JSON_NAME = 'EduBuddy_Memory.json'
PICKLE_NAME = 'EduBuddy_Memory.pkl'
#@print_function_name
def __init__(self, threads : list):
super().__init__()
self.end = False
self.configure(bg = "white")
self.threads = threads
self.context = ""
self.is_left = False
self.is_up = False
# Check windows
llm = ChatOpenAI(model_name = "gpt-4", temperature = 0.9)
if os.path.exists(Window.PICKLE_NAME):
with open(Window.PICKLE_NAME, 'rb') as f:
self.memory = pickle.load(f)
else:
self.memory = ConversationSummaryMemory(llm = llm)
# if os.path.exists(Window.JSON_NAME):
# with open(Window.JSON_NAME, 'r') as f:
# memory = json.load(f)
# self.memory.save_context({"input": f"Here is the context from old conversation {memory['history']}"}, {"output": "Okay, I will remember those!"})
self.subtractedDistace = 25
self.addedDistance = 25
if (platform.system()) == "Windows":
self.addedDistance = 80
self.save = ""
self.title("EduBuddy")
self.before_text = 0
# self.overrideredirect(True) # Remove window decorations (title, borders, exit & minimize buttons)
self.attributes("-topmost", True)
self.messagebox_opening = False
self.quiz_opening = False
# screen info
screen = get_monitors()[0] # number can be changed ig
self.screen_w = screen.width
self.screen_h = screen.height
self.is_maximized = False
# Set the window's initial position
self.padding_w = int(self.screen_w * 0.005)
self.padding_h = int(self.screen_w * 0.005)
self.sq_button_height = 45
self.language = tk.StringVar(self)
self.language.set("English")
# summarize, erase, show, save, quiz, close, microphone, file, text button and textbox
self.summarize_button = AButton(self, text = "Summarize", command = self.summarize_button_press)
self.erase_button = AButton(self, text = "Erase", command = self.erase_button_press)
self.show_button = AButton(self, text = "Show", command = self.show_button_press)
self.save_button = AButton(self, text = "Save", command = self.save_button_press)
self.quiz_button = AButton(self, text = "Quiz", command = self.quiz_button_press)
self.language_button = tk.OptionMenu(self, self.language, "English", "Italian", "Afrikaans", "Spanish", "German", "French", "Indonesian", "Russian", "Polish", "Ukranian", "Greek", "Latvian", "Mandarin", "Arabic", "Turkish", "Japanese", "Swahili", "Welsh", "Korean", "Icelandic", "Bengali", "Urdu", "Nepali", "Thai", "Punjabi", "Marathi", "Telugu")#AButton(self, text = "Language", command = self.language_button_press)
self.mic_button = AButton(self, text = "From Mic", command = asking)
self.file_button = AButton(self, text = "From File", command = self.file_button_press)
self.text_button = AButton(self, text = "From Text", command = self.text_button_press)
self.context_title = tk.Label(self, text = "Context", bg = "lightblue")
self.minimize_button = AButton(self, text = '-', command = self.minimize_button_press)
self.maximize_button = AButton(self, text = '+', command = self.maximize_button_press)
self.close_button = AButton(self, text = "x", command = self.close_button_press)
self.icon_size = 45
script_dir = os.path.dirname(os.path.abspath(__file__))
image_path = os.path.join(script_dir, "media", "buddy.png")
self.image = Image.open(image_path)
self.image = self.image.resize((self.icon_size, self.icon_size))
self.image = self.image.transpose(Image.FLIP_LEFT_RIGHT)
self.was_right = True
self.image_tk = ImageTk.PhotoImage(self.image)
self.img_label = tk.Label(self, image = self.image_tk)
# Text output
self.output_box = tk.Text(self, borderwidth = 0, highlightthickness = 0, font = ("Times New Roman", 14))
self.change_size(w = 400, h = 500)
self.output_box.configure(state = "normal")
# # Text input field
self.output_box.delete("1.0", tk.END)
# self.output_box.bind("<Return>", self.text_button_press)
# Bind mouse events
self.bind("<ButtonPress-1>", self.on_button_press)
self.bind("<B1-Motion>", self.on_button_motion)
self.bind("<ButtonRelease-1>", self.on_button_release)
# Quiz variables
self.current_quiz_ans = -1
self.current_quiz_score = 0
self.current_quiz_questions = []
self.quiz_obj = None
self.quiz_alternative_buttons = [None, None, None, None]
#@print_function_name
def maximize_button_press(self):
"""Maximize window"""
if not self.is_maximized:
self.is_maximized = True
self.info = (self.is_left, self.is_up, self.w ,self.h)
self.is_left = True
self.is_up = True
self.change_size(w = self.screen_w - 2 * self.padding_w, h = self.screen_h - 2 * self.padding_h- 25, changed = not self.info[0])
else:
self.is_maximized = False
(self.is_left, self.is_up, w ,h) = self.info
self.change_size(w = w, h = h, changed = not self.is_left)
#@print_function_name
def minimize_button_press(self):
"""Minimize window"""
self.messagebox_opening = True
messagebox.showwarning(title = "Minimize warning", message = "Be careful, there will be error if you are using Stage Manager on Mac")
self.messagebox_opening = False
self.overrideredirect(False)
self.wm_state('iconic')
#@print_function_name
def change_size(self, w = None, h = None, changed = None):
"""Change size of window, and position of elements if needed"""
if w is not None:
self.w = w # was 200
if h is not None:
self.h = h # was 300
# self.x = self.screen_w - self.w - self.padding_w # X coordinate
# self.y = self.screen_h - self.h - self.padding_h # Y coordinate
self.x = self.padding_w if self.is_left else self.screen_w - self.w - self.padding_w
self.y = self.padding_h + self.addedDistance if self.is_up else self.screen_h - self.h - self.padding_h - self.subtractedDistace #- self.addedDistance
if changed:
self.img_label.destroy()
self.image = self.image.transpose(Image.FLIP_LEFT_RIGHT)
self.image_tk = ImageTk.PhotoImage(self.image)
self.img_label = tk.Label(self, image = self.image_tk)
self.was_right = not self.was_right
self.geometry(f"+{self.x}+{self.y}")
if w is not None or h is not None:
self.geometry(f"{self.w}x{self.h}")
# summarize button
self.summarize_button.place(x = 0, y = 0, width = self.w / 5, height = self.sq_button_height)
# erase the screen
self.erase_button.place(x = self.w / 5, y = 0, width = self.w / 5, height = self.sq_button_height)
# show memory
self.show_button.place(x = self.w * 2 / 5, y = 0, width = self.w / 5, height = self.sq_button_height)
# save memory
self.save_button.place(x = self.w * 3 / 5, y = 0, width = self.w / 5, height = self.sq_button_height)
# quiz button
self.quiz_button.place(x = self.w * 4 / 5, y = 0, width = self.w / 5, height = self.sq_button_height)
# close button
# self.language_button.place(x = 0, y = self.h - 50, width = self.w / 5, height = self.sq_button_height)
# button get from microphone
self.mic_button.place(x = self.w / 5, y = self.h - 50, width = self.w / 5, height = self.sq_button_height)
# button get from local file
self.file_button.place(x = self.w * 2 / 5, y = self.h - 50, width = self.w / 5, height = self.sq_button_height)
# button get from text
self.text_button.place(x = self.w * 3 / 5, y = self.h - 50, width = self.w / 5, height = self.sq_button_height)
# button minimize
# self.maximize_button.place(x = -17.5 + (self.w - self.icon_size + self.w * 4 / 5) / 2, y = self.h - 50, width = 35, height = self.sq_button_height / 3)
# self.minimize_button.place(x = -17.5 + (self.w - self.icon_size + self.w * 4 / 5) / 2, y = self.h - 35, width = 35, height = self.sq_button_height / 3)
# self.close_button.place(x = -17.5 + (self.w - self.icon_size + self.w * 4 / 5) / 2, y = self.h - 20, width = 35, height = self.sq_button_height / 3)
# Context title box
self.context_title.place(x = 3, y = 45, w = self.w - 6, h = 25)
# self.img_label.place(x = self.w - self.icon_size, y = self.h - self.icon_size)
self.output_box.place(x = 3, y = 65, w = self.w - 6, h = (self.h - 2 * self.sq_button_height - 25), )
# self.output_box.config(highlightbackground = 'black', highlightthickness = 1)
if self.is_left:
self.img_label.place(x = 5, y = self.h - self.icon_size - 5)
self.language_button.place(x = self.w * 4 / 5, y = self.h - 50, width = self.w / 5, height = self.sq_button_height)
else:
self.img_label.place(x = self.w - self.icon_size - 5, y = self.h - self.icon_size - 5)
self.language_button.place(x = 0, y = self.h - 50, width = self.w / 5, height = self.sq_button_height)
#@print_function_name
def close_button_press(self):
"""Close window with message box"""
self.messagebox_opening = True
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.end = True
for t in self.threads:
t.join()
# with open(Window.JSON_NAME, 'w') as f:
# json.dump(self.memory.load_memory_variables({}), f)
with open(Window.PICKLE_NAME, 'wb') as f:
pickle.dump(self.memory, f)
self.destroy()
self.messagebox_opening = False
#@print_function_name
def file_button_press(self):
"""Open file(s) to query"""
self.context_title.config(text = "Read from file(s)")
self.output_box.configure(state = "disabled")
file_path = filedialog.askopenfilenames(
parent = self, title = "Choose one or multiple file(s)"
)
self.output_box.configure(state = "normal")
if len(file_path) != 0:
# Do something with the selected file path, such as printing it
documents = SimpleDirectoryReader(input_files = file_path).load_data()
# index = VectorStoreIndex.from_documents(documents)
index = GPTVectorStoreIndex.from_documents(documents)
# query index
query_engine = index.as_query_engine()
self.context_title.config(
text = "Enter your question about the file anywhere below"
)
summary = query_engine.query("Summarize key informations in this/ these files!")
# print("\n", summary, end = "\n\n")
self.output_box.insert(tk.END, f"\n{summary.response}\n")
self.save += "(Summary from documents: " + str(summary.response) + "), "
# response = query_engine.query("Explain me The Schrodinger equation")
# result = query_engine.query("Why do we need quantum mechanics")
# answer = query_engine.query("Who is Julia Cook")
# random = query_engine.query("Who is Leo Messi")
# print("Count:", index., "\n\n\n\n")
# for doc_id in index.document_ids():
# embedding = index.embedding_for_document(doc_id)
# print(f"Embedding for document {doc_id}: {embedding}")
# print("\n", response, end = "\n\n")
# print("\n", result, end = "\n\n")
# print("\n", answer, end = "\n\n")
# print("\n", random, end = "\n\n")
# # print("Selected file:", file_path)
# print(len(documents))
#@print_function_name
def in_textbox(self, x, y, xx, yy):
"""Return true only if the position of mouse is in textbox"""
x1, y1, w, h = 0, 30, self.w - 6, (self.h - 2 * self.sq_button_height - 25)
x2, y2 = x1 + w, y1 + h
return x1 <= x <= x2 and y1 <= y <= y2
#@print_function_name
def on_button_press(self, event):
"""Track button press"""
if self.in_textbox(event.x, event.y, event.x_root, event.y_root):
if not self.messagebox_opening and not self.quiz_opening:
self.messagebox_opening = True
# print("before:", self.is_up)
self.change_size(w = 600, h = 750)
self.output_box.config(font = ("Times New Roman", 21))
# self.output_box.configure(state = 'disabled')
self.output_box.configure(state = 'normal')
# self.output_box.insert(tk.END, "HEY!\n")
else:
self.change_size(w = 400, h = 500)
self.output_box.config(font = ("Times New Roman", 14))
# Capture the initial mouse position and window position
self.x = event.x_root
self.y = event.y_root
self.offset_x = self.winfo_x()
self.offset_y = self.winfo_y()
self.messagebox_opening = False
self.output_box.configure(state = 'disabled')
# self.output_box.configure(state = 'normal')
# self.output_box.insert(tk.END, "HEY!\n")
#@print_function_name
def on_button_motion(self, event):
"""Move window with the mouse if it holds"""
if not self.messagebox_opening and not self.in_textbox(event.x, event.y, event.x_root, event.y_root):
# Calculate the new window position based on mouse movement
new_x = self.offset_x + (event.x_root - self.x)
new_y = self.offset_y + (event.y_root - self.y)
self.geometry(f"+{new_x}+{new_y}")
#@print_function_name
def on_button_release(self, event):
"""Stick to closest corner when release"""
if not self.messagebox_opening and not self.in_textbox(event.x, event.y, event.x_root, event.y_root):
changed = self.is_left != (event.x_root - event.x + self.w / 2 < self.screen_w / 2)
self.is_left = event.x_root - event.x < (self.screen_w - self.w) / 2
self.is_up = event.y_root - event.y < (self.screen_h - self.h) / 2
self.change_size(changed = changed)
#@print_function_name
def waitAndReturnNewText(self):
"""Running in background waiting for pasteboard"""
while not self.end:
try:
config.text = pyperclip.waitForNewPaste(timeout = 10)
except:
pass
#@print_function_name
def summarize_button_press(self):
"""Summarize text in pasteboard"""
# self.output_box.configure(state = "disabled")
# Destroy old canvas
try:
self.canvas.destroy()
except:
pass
text = ' '.join(re.split(" \t\n", config.text))
if text != "":
if len(text.split(" ")) >= 30:
# generate title
title = tc.getTitleFromText(text, self.language.get())
self.context_title.config(
text = textwrap.fill(title.split('"')[1], width = self.w - 20)
)
# generate summary
minimumWords = 0
maximumWords = tc.getResponseLengthFromText(text)
response = self.run_gpt(tc.generateSummaryFromText, (text, minimumWords, maximumWords, self.language.get()))
# thread = Thread(target = window.waitAndReturnNewText)
# thread.start()
# self.threads.append(thread)
# self.output_box.configure(state = "normal")
self.output_box.insert(tk.END, f"\nSummary:\n{response}\n")
self.before_text = len(self.output_box.get("1.0", tk.END))
self.save += "(Summary: " + response + "), "
else:
# self.output_box.configure(state = "normal")
self.output_box.insert(tk.END, "\nPlease choose a longer text to summarize\n")
else:
# self.output_box.configure(state = "normal")
self.output_box.insert(tk.END, "\nNo text found! Choose a new text if this keep happens\n")
# self.output_box.configure(state = 'normal')
# print(self.messagebox_opening)
#@print_function_name
def quiz_button_press(self):
"""Generate quizzes from pasteboard"""
# generate title
self.messagebox_opening = True
self.quiz_opening = True
print(self.output_box.get("1.0", tk.END))
# self.geometry("600x750")
if messagebox.askyesno("Quiz", "Are you sure you are ready for the quiz? Also, if you want to save this conversation, click cancel and click 'Save'"):
self.messagebox_opening = False
self.output_box.delete("1.0", tk.END)
self.output_box.configure(state = "disabled")
self.geometry("800x1200")
text = ' '.join(re.split(" \t\n", config.text))
print(len(text), text[:100], )
if text != "":
if len(text.split(" ")) >= 50:
title = tc.getTitleFromText(text, self.language.get())
self.context_title.config(
text = textwrap.fill(title.split('"')[1], width = self.w - 20)
)
# generate quiz
response = self.run_gpt(tc.getMultipleChoiceQuiz, (text, self.language.get(), 5))
self.quiz_obj = Quiz(response, Window.NUM_QUIZ_QUESTIONS)
self.quiz_iteration(self.quiz_obj)
else:
self.context_title.config(
text = "Please choose a longer text to make quiz"
)
else:
self.context_title.config(
text = "No text found! Choose a new text if this keep happens"
)
self.output_box.configure(state = "normal")
else:
self.messagebox_opening = False
self.quiz_opening = False
#@print_function_name
def show_button_press(self):
"""Show memory (saved and unsaved)"""
self.messagebox_opening = True
new_window = tk.Toplevel(self)
new_window.title("Memory")
t = tk.Text(new_window, borderwidth = 0, highlightthickness = 0)
t.pack()
t.insert(tk.END, f"Unsaved: {self.save}\nSaved: {self.memory.load_memory_variables({})['history']}")
t.configure(state = "disabled")
new_window.grab_set()
self.wait_window(new_window)
self.messagebox_opening = False
#@print_function_name
def text_button_press(self):
"""Answer to text inside textbox from user"""
text = ' '.join(re.split(" \t\n", self.output_box.get("1.0", "end-1c")[max(0, self.before_text-1):]))
if len(text) >= 2:
str1 = self.run_gpt(tc.sendGptRequest, (text, config.text, self.language.get(), self.memory))
try:
output ='\n'.join(str1.split('\n\n')[1:])
self.save += "(Q: " + text + " and A: " + str1 + "), "
if output == '':
raise ValueError
except:
output = str1
self.output_box.insert(tk.END, '\n\n' + output + '\n')
self.before_text = len(self.output_box.get("1.0", tk.END))
return 'break'
# Run your function here. And then with the gpt output, run insert it into output box
else:
self.context_title.config(
text = "Your text is too short to do any work!"
)
#@print_function_name
def quiz_iteration(self, quiz_obj):
"""Iterate through questions in quiz generated and put it nicely in canvas"""
if len(quiz_obj.questions) == 0:
self.canvas.destroy()
self.display_quiz_results()
return
# Destroy old canvas
try:
self.canvas.destroy()
except:
pass
# make quiz question and button element from Quiz obj
self.canvas = tk.Canvas(self, width = self.w, height = 300)
wrapped_text = textwrap.fill(
quiz_obj.questions[0]["question"], width = self.w - 20
)
self.question = self.canvas.create_text(
self.w // 2, 30, text = wrapped_text, width = self.w - 40
)
self.quiz_alternative_buttons = []
for i in range(4):
x1, y1, x2, y2 = 10, 65 + i * 45, self.w - 10, 110 + i * 45
rect = self.canvas.create_rectangle(x1, y1, x2, y2, fill = "white")
text = self.canvas.create_text(
(x1 + x2) // 2,
(y1 + y2) // 2,
text = textwrap.fill(
f"""{i+1}. {quiz_obj.questions[0]["alternatives"][i]}""",
width = self.w - 20,
),
width = self.w - 40,
)
self.canvas.tag_bind(
rect,
"<Button-1>",
lambda event, choice = i: self.quiz_choice(event, choice),
)
self.canvas.tag_bind(
text,
"<Button-1>",
lambda event, choice = i: self.quiz_choice(event, choice),
)
self.quiz_alternative_buttons.append((rect, text))
self.current_quiz_ans = quiz_obj.questions[0]["answer"]
self.current_quiz_questions.append([wrapped_text])
quiz_obj.questions.pop(0)
self.canvas.place(x = 0, y = (-100 + 45 * (i + 1)), w = self.w, h = 300)
#@print_function_name
def quiz_choice(self, event, choice):
"""Response to users' choices"""
if choice == self.current_quiz_ans:
self.current_quiz_score += 1
for rect, text in self.quiz_alternative_buttons:
self.canvas.itemconfig(rect, fill = "white")
self.canvas.itemconfig(self.quiz_alternative_buttons[choice][0], fill = "red")
self.canvas.itemconfig(
self.quiz_alternative_buttons[self.current_quiz_ans][0], fill = "green"
)
self.current_quiz_questions[-1].append(
self.canvas.itemcget(self.quiz_alternative_buttons[choice][1], "text")
.strip()
.split(maxsplit = 1)[1]
)
self.current_quiz_questions[-1].append(
self.canvas.itemcget(
self.quiz_alternative_buttons[self.current_quiz_ans][1], "text"
)
.strip()
.split(maxsplit = 1)[1]
)
self.after(ms = 2000, func = lambda: self.quiz_iteration(self.quiz_obj))
#@print_function_name
def display_quiz_results(self):
"""Display quiz results"""
output = (
f"Quiz results: {self.current_quiz_score}/{Window.NUM_QUIZ_QUESTIONS}:\n\n"
)
for id, vals in enumerate(self.current_quiz_questions):
try:
output += f"Question {id + 1}: {vals[0]}\nResult: {'Correct' if vals[1] == vals[2] else 'Incorrect'}!\nYour choice: {vals[1]}\nAnswer: {vals[2]}\n\n"
except:
pass
self.save += "(Quiz:" + ' '.join(re.split(" \t\n", str(self.current_quiz_questions))) + "), "
self.output_box.insert(tk.END, f"\n{output}")
self.before_text = len(self.output_box.get("1.0", tk.END))
self.quiz_opening = False
#@print_function_name
def save_button_press(self):
"""Save unsaved memory to saved memory to later save into file"""
self.output_box.delete("1.0", tk.END)
self.memory.save_context({"input": f"""Here is a context (remember topic and user's info) for future requests: {self.save}"""},
{"output": f"""Thank you, I will remember and be here for you!"""})
self.save = ""
#@print_function_name
def load_data(self, func, val, ret):
"""Run function and value set from run_gpt function"""
ret[0] = func(*val)
#@print_function_name
def run_gpt(self, func, val):
"""Run complicated functions in another thread"""
ret = [" "]
loading_window = LoadingWindow(self, ret)
thread = Thread(target = self.load_data, args = (func, val, ret))
thread.start()
loading_window.grab_set()
self.wait_window(loading_window)
return ret[0]
#@print_function_name
def erase_button_press(self):
"""Erase all memory (saved and unsaved and in file)"""
llm = ChatOpenAI(model_name = "gpt-4", temperature = 0.9)
self.memory = ConversationSummaryMemory(llm = llm)
with open(Window.PICKLE_NAME, 'wb') as f:
pickle.dump(self.memory, f)
self.save = ""
class LoadingWindow(tk.Toplevel):
"""Loading window to let user know the system is running"""
#@print_function_name
def __init__(self, master, ret):
super().__init__(master)
self.ret = ret
self.title("Loading")
self.string = tk.StringVar(self, "Working on it")
label = tk.Label(self, textvariable = self.string)
label.pack()
self.progress = ttk.Progressbar(self, orient = tk.HORIZONTAL, length = 200, mode = 'determinate')
self.progress.pack()
self.percent = tk.Label(self, text = "0%")
self.percent.pack()
# self.update_progress()
t = Thread(target = self.update_progress)
t.start()
#@print_function_name
def update_progress(self):
"""Update the progress bar and text"""
i = 0
while self.ret == [" "]:
if i != 99:
if not i + 1 % 33:
self.string.set(self.string.get() + '.')
self.progress['value'] = i+1
self.percent['text'] = f"{i+1}%"
self.update_idletasks()
time.sleep(0.1)
i += 1
else:
continue
self.progress['value'] = 100
self.percent['text'] = f"100%"
time.sleep(2)
self.destroy()
class AButton(tk.Button):
"""A class inherit from tk.Button to change color when move mouse to its region"""
#@print_function_name
def __init__(self, master, **kw):
self.master = master
tk.Button.__init__(self, master = master, highlightbackground = "white", **kw)
self.bind('<Enter>', self.on_enter)
self.bind('<Leave>', self.on_leave)
#@print_function_name
def on_enter(self, e):
"""Change color to darkgray when the mouse move to its region"""
# print('a')
if not self.master.messagebox_opening:
self.config(fg = "darkgray", highlightbackground = "darkgray")
#@print_function_name
def on_leave(self, e = None):
"""Change color back to default when the mouse leave"""
# if not self.messagebox_opening:
self.config(fg = "black", highlightbackground = "white")
if __name__ == "__main__":
threads = []
window = Window(threads)
threads = threads
thread = Thread(target = window.waitAndReturnNewText)
thread.start()
threads.append(thread)
window.mainloop()
| [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((556, 569), 'config.init', 'config.init', ([], {}), '()\n', (567, 569), False, 'import config\n'), ((29784, 29826), 'threading.Thread', 'Thread', ([], {'target': 'window.waitAndReturnNewText'}), '(target=window.waitAndReturnNewText)\n', (29790, 29826), False, 'from threading import Thread\n'), ((2528, 2575), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0.9)'}), "(model_name='gpt-4', temperature=0.9)\n", (2538, 2575), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2591, 2625), 'os.path.exists', 'os.path.exists', (['Window.PICKLE_NAME'], {}), '(Window.PICKLE_NAME)\n', (2605, 2625), False, 'import os\n'), ((3979, 3997), 'tkinter.StringVar', 'tk.StringVar', (['self'], {}), '(self)\n', (3991, 3997), True, 'import tkinter as tk\n'), ((4629, 4969), 'tkinter.OptionMenu', 'tk.OptionMenu', (['self', 'self.language', '"""English"""', '"""Italian"""', '"""Afrikaans"""', '"""Spanish"""', '"""German"""', '"""French"""', '"""Indonesian"""', '"""Russian"""', '"""Polish"""', '"""Ukranian"""', '"""Greek"""', '"""Latvian"""', '"""Mandarin"""', '"""Arabic"""', '"""Turkish"""', '"""Japanese"""', '"""Swahili"""', '"""Welsh"""', '"""Korean"""', '"""Icelandic"""', '"""Bengali"""', '"""Urdu"""', '"""Nepali"""', '"""Thai"""', '"""Punjabi"""', '"""Marathi"""', '"""Telugu"""'], {}), "(self, self.language, 'English', 'Italian', 'Afrikaans',\n 'Spanish', 'German', 'French', 'Indonesian', 'Russian', 'Polish',\n 'Ukranian', 'Greek', 'Latvian', 'Mandarin', 'Arabic', 'Turkish',\n 'Japanese', 'Swahili', 'Welsh', 'Korean', 'Icelandic', 'Bengali',\n 'Urdu', 'Nepali', 'Thai', 'Punjabi', 'Marathi', 'Telugu')\n", (4642, 4969), True, 'import tkinter as tk\n'), ((5321, 5367), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Context"""', 'bg': '"""lightblue"""'}), "(self, text='Context', bg='lightblue')\n", (5329, 5367), True, 'import tkinter as tk\n'), ((5773, 5819), 'os.path.join', 'os.path.join', (['script_dir', '"""media"""', '"""buddy.png"""'], {}), "(script_dir, 'media', 'buddy.png')\n", (5785, 5819), False, 'import os\n'), ((5841, 5863), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (5851, 5863), False, 'from PIL import Image, ImageTk\n'), ((6056, 6086), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['self.image'], {}), '(self.image)\n', (6074, 6086), False, 'from PIL import Image, ImageTk\n'), ((6112, 6147), 'tkinter.Label', 'tk.Label', (['self'], {'image': 'self.image_tk'}), '(self, image=self.image_tk)\n', (6120, 6147), True, 'import tkinter as tk\n'), ((6199, 6284), 'tkinter.Text', 'tk.Text', (['self'], {'borderwidth': '(0)', 'highlightthickness': '(0)', 'font': "('Times New Roman', 14)"}), "(self, borderwidth=0, highlightthickness=0, font=('Times New Roman', 14)\n )\n", (6206, 6284), True, 'import tkinter as tk\n'), ((7726, 7860), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', ([], {'title': '"""Minimize warning"""', 'message': '"""Be careful, there will be error if you are using Stage Manager on Mac"""'}), "(title='Minimize warning', message=\n 'Be careful, there will be error if you are using Stage Manager on Mac')\n", (7748, 7860), False, 'from tkinter import messagebox\n'), ((11873, 11927), 'tkinter.messagebox.askokcancel', 'messagebox.askokcancel', (['"""Quit"""', '"""Do you want to quit?"""'], {}), "('Quit', 'Do you want to quit?')\n", (11895, 11927), False, 'from tkinter import messagebox\n'), ((12537, 12622), 'tkinter.filedialog.askopenfilenames', 'filedialog.askopenfilenames', ([], {'parent': 'self', 'title': '"""Choose one or multiple file(s)"""'}), "(parent=self, title='Choose one or multiple file(s)'\n )\n", (12564, 12622), False, 'from tkinter import filedialog\n'), ((19234, 19389), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Quiz"""', '"""Are you sure you are ready for the quiz? Also, if you want to save this conversation, click cancel and click \'Save\'"""'], {}), '(\'Quiz\',\n "Are you sure you are ready for the quiz? Also, if you want to save this conversation, click cancel and click \'Save\'"\n )\n', (19253, 19389), False, 'from tkinter import messagebox\n'), ((20896, 20913), 'tkinter.Toplevel', 'tk.Toplevel', (['self'], {}), '(self)\n', (20907, 20913), True, 'import tkinter as tk\n'), ((20961, 21017), 'tkinter.Text', 'tk.Text', (['new_window'], {'borderwidth': '(0)', 'highlightthickness': '(0)'}), '(new_window, borderwidth=0, highlightthickness=0)\n', (20968, 21017), True, 'import tkinter as tk\n'), ((22807, 22848), 'tkinter.Canvas', 'tk.Canvas', (['self'], {'width': 'self.w', 'height': '(300)'}), '(self, width=self.w, height=300)\n', (22816, 22848), True, 'import tkinter as tk\n'), ((22876, 22943), 'textwrap.fill', 'textwrap.fill', (["quiz_obj.questions[0]['question']"], {'width': '(self.w - 20)'}), "(quiz_obj.questions[0]['question'], width=self.w - 20)\n", (22889, 22943), False, 'import textwrap\n'), ((26923, 26975), 'threading.Thread', 'Thread', ([], {'target': 'self.load_data', 'args': '(func, val, ret)'}), '(target=self.load_data, args=(func, val, ret))\n', (26929, 26975), False, 'from threading import Thread\n'), ((27242, 27289), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0.9)'}), "(model_name='gpt-4', temperature=0.9)\n", (27252, 27289), False, 'from langchain.chat_models import ChatOpenAI\n'), ((27316, 27350), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'llm'}), '(llm=llm)\n', (27341, 27350), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((27736, 27771), 'tkinter.StringVar', 'tk.StringVar', (['self', '"""Working on it"""'], {}), "(self, 'Working on it')\n", (27748, 27771), True, 'import tkinter as tk\n'), ((27788, 27828), 'tkinter.Label', 'tk.Label', (['self'], {'textvariable': 'self.string'}), '(self, textvariable=self.string)\n', (27796, 27828), True, 'import tkinter as tk\n'), ((27876, 27951), 'tkinter.ttk.Progressbar', 'ttk.Progressbar', (['self'], {'orient': 'tk.HORIZONTAL', 'length': '(200)', 'mode': '"""determinate"""'}), "(self, orient=tk.HORIZONTAL, length=200, mode='determinate')\n", (27891, 27951), False, 'from tkinter import ttk\n'), ((28010, 28035), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""0%"""'}), "(self, text='0%')\n", (28018, 28035), True, 'import tkinter as tk\n'), ((28111, 28146), 'threading.Thread', 'Thread', ([], {'target': 'self.update_progress'}), '(target=self.update_progress)\n', (28117, 28146), False, 'from threading import Thread\n'), ((28755, 28768), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (28765, 28768), False, 'import time\n'), ((29007, 29081), 'tkinter.Button.__init__', 'tk.Button.__init__', (['self'], {'master': 'master', 'highlightbackground': '"""white"""'}), "(self, master=master, highlightbackground='white', **kw)\n", (29025, 29081), True, 'import tkinter as tk\n'), ((2766, 2800), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'llm'}), '(llm=llm)\n', (2791, 2800), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((3187, 3204), 'platform.system', 'platform.system', ([], {}), '()\n', (3202, 3204), False, 'import platform\n'), ((3616, 3630), 'screeninfo.get_monitors', 'get_monitors', ([], {}), '()\n', (3628, 3630), False, 'from screeninfo import get_monitors\n'), ((5725, 5750), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5740, 5750), False, 'import os\n'), ((8809, 8839), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['self.image'], {}), '(self.image)\n', (8827, 8839), False, 'from PIL import Image, ImageTk\n'), ((8869, 8904), 'tkinter.Label', 'tk.Label', (['self'], {'image': 'self.image_tk'}), '(self, image=self.image_tk)\n', (8877, 8904), True, 'import tkinter as tk\n'), ((12972, 13017), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (13006, 13017), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, GPTVectorStoreIndex\n'), ((17458, 17488), 're.split', 're.split', (['""" \t\n"""', 'config.text'], {}), "(' \\t\\n', config.text)\n", (17466, 17488), False, 'import re\n'), ((27415, 27442), 'pickle.dump', 'pickle.dump', (['self.memory', 'f'], {}), '(self.memory, f)\n', (27426, 27442), False, 'import pickle\n'), ((2711, 2725), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2722, 2725), False, 'import pickle\n'), ((12210, 12237), 'pickle.dump', 'pickle.dump', (['self.memory', 'f'], {}), '(self.memory, f)\n', (12221, 12237), False, 'import pickle\n'), ((17076, 17113), 'pyperclip.waitForNewPaste', 'pyperclip.waitForNewPaste', ([], {'timeout': '(10)'}), '(timeout=10)\n', (17101, 17113), False, 'import pyperclip\n'), ((17903, 17937), 'TextConverter.getResponseLengthFromText', 'tc.getResponseLengthFromText', (['text'], {}), '(text)\n', (17931, 17937), True, 'import TextConverter as tc\n'), ((19600, 19630), 're.split', 're.split', (['""" \t\n"""', 'config.text'], {}), "(' \\t\\n', config.text)\n", (19608, 19630), False, 'import re\n'), ((28589, 28604), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (28599, 28604), False, 'import time\n'), ((12828, 12872), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'file_path'}), '(input_files=file_path)\n', (12849, 12872), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, GPTVectorStoreIndex\n'), ((23450, 23543), 'textwrap.fill', 'textwrap.fill', (['f"""{i + 1}. {quiz_obj.questions[0][\'alternatives\'][i]}"""'], {'width': '(self.w - 20)'}), '(f"{i + 1}. {quiz_obj.questions[0][\'alternatives\'][i]}", width\n =self.w - 20)\n', (23463, 23543), False, 'import textwrap\n')] |
import os
from llama_index import StringIterableReader, GPTTreeIndex
import llama_index
openai_api_key = os.environ.get('OPENAI_API_KEY')
# input_question = "How tall is Tom Hiddleston"
input_question = "Who is taller Tom Hiddleston or Chris Hemsworth"
input_question_list = []
input_question_list.append(input_question)
documents = StringIterableReader().load_data(texts=input_question_list)
index = GPTTreeIndex.from_documents(documents)
response = index.query("Is this text comparing two or more things? Give a True or False answer")
get_comparer_variable_for_true = index.query("return the word True as an answer to this query")
# print(f"I got {get_comparer_variable_for_true} as answer of type {type(get_comparer_variable_for_true)}")
get_comparer_variable_for_false = index.query("return the word False as an answer to this query")
print(f" Response is : [{response}]")
if response == get_comparer_variable_for_true:
print("Response is True")
else :
print("Response is not True")
if response == get_comparer_variable_for_false:
print("Response is False")
else :
print("Response is not False")
# print(f"Is response instance of type lama_index.response.schema.Response : {isinstance(response, llama_index.response.schema.Response)}")
# my_object = "True"
# my_object_as_Response = llama_index.response.schema.Response(my_object)
# print(f"Typecast {my_object} : {type(my_object_as_Response)}")
# print(f"Is my_object_as_Response instance of type lama_index.response.schema.Response : {isinstance(my_object_as_Response, llama_index.response.schema.Response)}")
# print(f"ID of my_object_as_Response : {my_object_as_Response} - {id(my_object_as_Response)}")
# print(f"ID of response : {response} - {id(response)}")
# print(f"ID of my_object_as_Response : {my_object_as_Response} - {id(my_object_as_Response)}")
# print(f"Is my_object_as_Response equal to response ? {my_object_as_Response == response}")
# print(f"Is my_object_as_Response same as response ? {my_object_as_Response is response}")
| [
"llama_index.StringIterableReader",
"llama_index.GPTTreeIndex.from_documents"
] | [((108, 140), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (122, 140), False, 'import os\n'), ((407, 445), 'llama_index.GPTTreeIndex.from_documents', 'GPTTreeIndex.from_documents', (['documents'], {}), '(documents)\n', (434, 445), False, 'from llama_index import StringIterableReader, GPTTreeIndex\n'), ((339, 361), 'llama_index.StringIterableReader', 'StringIterableReader', ([], {}), '()\n', (359, 361), False, 'from llama_index import StringIterableReader, GPTTreeIndex\n')] |
# Imports
from collections import defaultdict
from time import sleep
from llama_index import (
StorageContext,
load_index_from_storage,
set_global_service_context,
)
from model_context import get_anyscale_context
from templates import custom_template, yn_template
import csv
from tqdm import tqdm
from openai import OpenAI
client = OpenAI(base_url="https://api.endpoints.anyscale.com/v1", api_key="KEY")
# DEBUG LOGS
# import llama_index
# llama_index.set_global_handler("simple")
rag = True
yn = True
if rag:
# Select Model
print("Loading model context...")
service_context = get_anyscale_context()
set_global_service_context(service_context)
# Load embedded data for RAG
print("Loading RAG embeddings...")
storage_context = StorageContext.from_defaults(persist_dir="vector-db-all")
index = load_index_from_storage(
service_context=service_context, storage_context=storage_context
)
# Assemble Query Engine
top_k = 5
if yn:
query_engine = index.as_query_engine(
text_qa_template=yn_template,
similarity_top_k=top_k,
# verbose=True,
# streaming=True,
)
else:
query_engine = index.as_query_engine(
text_qa_template=custom_template,
similarity_top_k=top_k,
# verbose=True,
# streaming=True,
)
def query_baseline(text: str, yn: bool) -> str:
while True:
if yn:
content_msg = "Answer with yes/no and an explanation."
else:
content_msg = "Express whether the statement is true or false and explain why." #Your job is to
try:
chat_completion = client.chat.completions.create(
model="meta-llama/Llama-2-7b-chat-hf",
messages=[
{
"role": "system",
"content": content_msg,
},
{
"role": "user",
"content": text,
},
],
temperature=0,
)
return chat_completion.choices[0].message.content.strip()
except:
print("BROKE: ", text)
sleep(10)
# Load evaluation data
print("Loading evaluation data...")
labeled_data = defaultdict(list)
with open("../all-exemplars-pruned/positive.csv", "r") as full_data:
data_reader = csv.DictReader(full_data)
for sample in data_reader:
labeled_data[sample["generic"]].append(sample["exemplar"])
print(f"{len(labeled_data)} generics loaded!")
generics = list(labeled_data.keys())
# Evaluation Loop
print("Beginning evaluation:")
tie = 0
loss = 0
win = 0
with open(f"some_answers_{'rag' if rag else 'base'}_{'yn' if yn else 'tf'}.txt", 'w') as ans_file:
for i in tqdm(range(1000), desc="Generic evaluation process"):
sample = generics[i].lower()
for ext in ["Some", "It is never the case that"]:
prompt = ext.lower() + " " + sample.lower()
if yn:
if ext == "It is never the case that":
prompt = "Is it never the case that " + sample[:-1].lower() + "?"
else:
prompt = "Do " + prompt[:-1] + "?"
if rag:
response = query_engine.query(prompt)
else:
response = query_baseline(prompt, yn)
# Record answer
ans_file.write("INDEX: " + str(i) + '\n')
ans_file.write("BASE INPUT: " + prompt + '\n')
ans_file.write("RESPONSE: " + '\n' + str(response) + '\n\n')
if yn:
process = str(response).lower()
false_count = process.count("no") - process.count("not") - process.count("now") - process.count("noc") - process.count("nor") - process.count("non") - process.count("nou")
true_count = str(response).lower().count("yes") - str(response).lower().count("eyes")
else:
false_count = str(response).lower().count("false")
true_count = str(response).lower().count("true")
# print(false_count)
# print(true_count)
if ext == "Some":
good = true_count
bad = false_count
elif ext == "It is never the case that":
good = false_count
bad = true_count
ans_file.write("RESULT: ")
if good > bad:
win += 1
ans_file.write("WIN")
elif bad > good:
loss += 1
ans_file.write("LOSS")
else:
tie += 1
ans_file.write("TIE")
ans_file.write('\n\n-------------------\n\n')
print("Wins: ", win)
print("Ties: ", tie)
print("Loss: ", loss)
| [
"llama_index.set_global_service_context",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((345, 416), 'openai.OpenAI', 'OpenAI', ([], {'base_url': '"""https://api.endpoints.anyscale.com/v1"""', 'api_key': '"""KEY"""'}), "(base_url='https://api.endpoints.anyscale.com/v1', api_key='KEY')\n", (351, 416), False, 'from openai import OpenAI\n'), ((2365, 2382), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2376, 2382), False, 'from collections import defaultdict\n'), ((605, 627), 'model_context.get_anyscale_context', 'get_anyscale_context', ([], {}), '()\n', (625, 627), False, 'from model_context import get_anyscale_context\n'), ((632, 675), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (658, 675), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((771, 828), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""vector-db-all"""'}), "(persist_dir='vector-db-all')\n", (799, 828), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((841, 935), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(service_context=service_context, storage_context=\n storage_context)\n', (864, 935), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((2470, 2495), 'csv.DictReader', 'csv.DictReader', (['full_data'], {}), '(full_data)\n', (2484, 2495), False, 'import csv\n'), ((2280, 2289), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (2285, 2289), False, 'from time import sleep\n')] |
#!/usr/bin/env python3
# Copyright (c) 2023-2024 Steve Castellotti
# This file is part of Urcuchillay and is released under the MIT License.
# See LICENSE file in the project root for full license information.
import logging
import os
import sys
import config
try:
import chromadb
import llama_index
import llama_index.vector_stores
import transformers
import utils
except ModuleNotFoundError as e:
print('\nError importing Python module(s)')
print('If installed using setup.sh it may be necessary to run:\n')
print('pyenv activate urcuchillay-env\n')
sys.exit(1)
class Client:
def __init__(self, args):
self.debug = args.debug
llama_debug = llama_index.callbacks.LlamaDebugHandler(print_trace_on_end=self.debug)
self.callback_manager = llama_index.callbacks.CallbackManager([llama_debug])
# Fallback settings for api_base, api_key, and api_version
os.environ['OPENAI_API_BASE'] = config.APIConfig.get_openai_api_base(host=args.api_host, port=args.api_port)
os.environ['OPENAI_API_KEY'] = config.APIConfig.OPENAI_API_KEY
os.environ['OPENAI_API_VERSION'] = config.APIConfig.OPENAI_API_VERSION
# Set Parallel Iterator
os.environ['TOKENIZERS_PARALLELISM'] = 'true' if config.Config.TOKENIZERS_PARALLELISM else 'false'
# ChromaDB Settings
self.db = None
self.chromadb_settings = chromadb.config.Settings(
anonymized_telemetry=config.Config.ANONYMIZED_TELEMETRY,
allow_reset=config.Config.ALLOW_RESET,
)
self.llm = None
self.service_context = None
self.index = None
def get_llm(self, args):
return llama_index.llms.OpenAI(
model='text-davinci-003',
temperature=args.temperature,
max_tokens=args.context,
api_base=config.APIConfig.get_openai_api_base(host=args.api_host, port=args.api_port),
api_key=config.APIConfig.OPENAI_API_KEY,
api_version=config.APIConfig.OPENAI_API_VERSION,
max_retries=args.max_retries,
timeout=args.timeout,
callback_manager=self.callback_manager,
)
def get_service_context(self, llm, args):
embed_model = config.Config.EMBED_MODEL_NAME
if hasattr(args, 'embed_model_name'):
if args.embed_model_name == 'default' or args.embed_model_name == 'local':
embed_model = args.embed_model_name
else:
if hasattr(args, 'embed_model_provider'):
# use Huggingface embeddings
embed_model = llama_index.embeddings.HuggingFaceEmbedding(
model_name=args.embed_model_provider + '/' + args.embed_model_name)
return llama_index.ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
callback_manager=self.callback_manager,
context_window=args.context,
num_output=args.max_new_tokens,
)
def get_index(self, service_context, args, storage_type=config.Config.STORAGE_TYPE):
if storage_type == 'json' and self.index:
return self.get_index(service_context, args)
elif storage_type == 'chromadb':
return self.get_index_chroma(service_context, args)
else:
return None
def save_index(self, args, storage_type=config.Config.STORAGE_TYPE):
if storage_type == 'json' and self.index:
self.index.storage_context.persist(persist_dir=args.storage)
elif storage_type == 'chromadb':
# For ChromaDB, storage is already written to disk
# as part of the loading data process
pass
def reset_index(self, args):
logging.warning('resetting index')
if config.Config.STORAGE_TYPE == 'json':
utils.storage_reset(storage_path=args.storage)
elif config.Config.STORAGE_TYPE == 'chromadb':
if not self.db:
self.db = chromadb.PersistentClient(
settings=self.chromadb_settings,
path=args.storage
)
self.db.reset()
self.reset_chroma_collection()
self.service_context = self.get_service_context(self.llm, args)
self.index = self.get_index(self.service_context, args)
@staticmethod
def get_index_json(service_context, args):
if args.load and all(os.path.exists(os.path.join(args.storage, filename))
for filename in config.Config.STORAGE_FILES):
# load vector index from storage
storage_context = llama_index.StorageContext.from_defaults(persist_dir=args.storage)
return llama_index.load_index_from_storage(storage_context, service_context=service_context)
else:
if not os.path.exists(args.data) or not os.listdir(args.data):
# Create a temporary empty file for the index if a missing or empty data directory was supplied
temp_file = utils.create_temporary_empty_file()
documents = llama_index.SimpleDirectoryReader(input_files=[temp_file]).load_data()
index = llama_index.VectorStoreIndex.from_documents(documents, service_context=service_context)
os.remove(temp_file)
return index
else:
documents = llama_index.SimpleDirectoryReader(args.data).load_data()
return llama_index.VectorStoreIndex.from_documents(
documents, service_context=service_context
)
def get_index_chroma(self, service_context, args):
if not self.db:
self.db = chromadb.PersistentClient(
settings=self.chromadb_settings,
path=args.storage
)
chroma_collection = self.db.get_or_create_collection('quickstart')
# set up ChromaVectorStore and load in data
vector_store = llama_index.vector_stores.ChromaVectorStore(chroma_collection=chroma_collection)
if args.load:
# noinspection PyTypeChecker
index = llama_index.VectorStoreIndex.from_vector_store(
vector_store,
service_context=service_context,
)
else:
storage_context = llama_index.storage.storage_context.StorageContext.from_defaults(
vector_store=vector_store)
documents = llama_index.SimpleDirectoryReader(args.data).load_data()
index = llama_index.VectorStoreIndex.from_documents(
documents, storage_context=storage_context, service_context=service_context
)
return index
@staticmethod
def reset_chroma_collection():
chroma_client = chromadb.EphemeralClient()
for collection in chroma_client.list_collections():
chroma_client.delete_collection(name=collection.name)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.StorageContext.from_defaults",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.callbacks.CallbackManager",
"llama_index.load_index_from_storage"
] | [((590, 601), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (598, 601), False, 'import sys\n'), ((704, 774), 'llama_index.callbacks.LlamaDebugHandler', 'llama_index.callbacks.LlamaDebugHandler', ([], {'print_trace_on_end': 'self.debug'}), '(print_trace_on_end=self.debug)\n', (743, 774), False, 'import llama_index\n'), ((807, 859), 'llama_index.callbacks.CallbackManager', 'llama_index.callbacks.CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (844, 859), False, 'import llama_index\n'), ((968, 1044), 'config.APIConfig.get_openai_api_base', 'config.APIConfig.get_openai_api_base', ([], {'host': 'args.api_host', 'port': 'args.api_port'}), '(host=args.api_host, port=args.api_port)\n', (1004, 1044), False, 'import config\n'), ((1420, 1545), 'chromadb.config.Settings', 'chromadb.config.Settings', ([], {'anonymized_telemetry': 'config.Config.ANONYMIZED_TELEMETRY', 'allow_reset': 'config.Config.ALLOW_RESET'}), '(anonymized_telemetry=config.Config.\n ANONYMIZED_TELEMETRY, allow_reset=config.Config.ALLOW_RESET)\n', (1444, 1545), False, 'import chromadb\n'), ((2798, 2981), 'llama_index.ServiceContext.from_defaults', 'llama_index.ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'callback_manager': 'self.callback_manager', 'context_window': 'args.context', 'num_output': 'args.max_new_tokens'}), '(llm=llm, embed_model=embed_model,\n callback_manager=self.callback_manager, context_window=args.context,\n num_output=args.max_new_tokens)\n', (2838, 2981), False, 'import llama_index\n'), ((3795, 3829), 'logging.warning', 'logging.warning', (['"""resetting index"""'], {}), "('resetting index')\n", (3810, 3829), False, 'import logging\n'), ((6041, 6126), 'llama_index.vector_stores.ChromaVectorStore', 'llama_index.vector_stores.ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection\n )\n', (6084, 6126), False, 'import llama_index\n'), ((6853, 6879), 'chromadb.EphemeralClient', 'chromadb.EphemeralClient', ([], {}), '()\n', (6877, 6879), False, 'import chromadb\n'), ((3891, 3937), 'utils.storage_reset', 'utils.storage_reset', ([], {'storage_path': 'args.storage'}), '(storage_path=args.storage)\n', (3910, 3937), False, 'import utils\n'), ((4696, 4762), 'llama_index.StorageContext.from_defaults', 'llama_index.StorageContext.from_defaults', ([], {'persist_dir': 'args.storage'}), '(persist_dir=args.storage)\n', (4736, 4762), False, 'import llama_index\n'), ((4782, 4872), 'llama_index.load_index_from_storage', 'llama_index.load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=\n service_context)\n', (4817, 4872), False, 'import llama_index\n'), ((5765, 5842), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'settings': 'self.chromadb_settings', 'path': 'args.storage'}), '(settings=self.chromadb_settings, path=args.storage)\n', (5790, 5842), False, 'import chromadb\n'), ((6206, 6303), 'llama_index.VectorStoreIndex.from_vector_store', 'llama_index.VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store,\n service_context=service_context)\n', (6252, 6303), False, 'import llama_index\n'), ((6391, 6487), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'llama_index.storage.storage_context.StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store\n =vector_store)\n', (6455, 6487), False, 'import llama_index\n'), ((6602, 6727), 'llama_index.VectorStoreIndex.from_documents', 'llama_index.VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=\n storage_context, service_context=service_context)\n', (6645, 6727), False, 'import llama_index\n'), ((1871, 1947), 'config.APIConfig.get_openai_api_base', 'config.APIConfig.get_openai_api_base', ([], {'host': 'args.api_host', 'port': 'args.api_port'}), '(host=args.api_host, port=args.api_port)\n', (1907, 1947), False, 'import config\n'), ((5097, 5132), 'utils.create_temporary_empty_file', 'utils.create_temporary_empty_file', ([], {}), '()\n', (5130, 5132), False, 'import utils\n'), ((5256, 5348), 'llama_index.VectorStoreIndex.from_documents', 'llama_index.VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=\n service_context)\n', (5299, 5348), False, 'import llama_index\n'), ((5360, 5380), 'os.remove', 'os.remove', (['temp_file'], {}), '(temp_file)\n', (5369, 5380), False, 'import os\n'), ((5536, 5628), 'llama_index.VectorStoreIndex.from_documents', 'llama_index.VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=\n service_context)\n', (5579, 5628), False, 'import llama_index\n'), ((2645, 2761), 'llama_index.embeddings.HuggingFaceEmbedding', 'llama_index.embeddings.HuggingFaceEmbedding', ([], {'model_name': "(args.embed_model_provider + '/' + args.embed_model_name)"}), "(model_name=args.\n embed_model_provider + '/' + args.embed_model_name)\n", (2688, 2761), False, 'import llama_index\n'), ((4047, 4124), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'settings': 'self.chromadb_settings', 'path': 'args.storage'}), '(settings=self.chromadb_settings, path=args.storage)\n', (4072, 4124), False, 'import chromadb\n'), ((4901, 4926), 'os.path.exists', 'os.path.exists', (['args.data'], {}), '(args.data)\n', (4915, 4926), False, 'import os\n'), ((4934, 4955), 'os.listdir', 'os.listdir', (['args.data'], {}), '(args.data)\n', (4944, 4955), False, 'import os\n'), ((6525, 6569), 'llama_index.SimpleDirectoryReader', 'llama_index.SimpleDirectoryReader', (['args.data'], {}), '(args.data)\n', (6558, 6569), False, 'import llama_index\n'), ((4508, 4544), 'os.path.join', 'os.path.join', (['args.storage', 'filename'], {}), '(args.storage, filename)\n', (4520, 4544), False, 'import os\n'), ((5161, 5219), 'llama_index.SimpleDirectoryReader', 'llama_index.SimpleDirectoryReader', ([], {'input_files': '[temp_file]'}), '(input_files=[temp_file])\n', (5194, 5219), False, 'import llama_index\n'), ((5456, 5500), 'llama_index.SimpleDirectoryReader', 'llama_index.SimpleDirectoryReader', (['args.data'], {}), '(args.data)\n', (5489, 5500), False, 'import llama_index\n')] |
from dataclasses import dataclass, field
from typing import Generator, List, Set, Dict, Optional, Tuple, Union, Any
from os.path import sep as PathSep
from transformers import AutoTokenizer
import llama_index
from llama_index import (
PromptTemplate,
Document,
Prompt,
ServiceContext,
set_global_service_context,
set_global_tokenizer
)
from llama_index.response_synthesizers import TreeSummarize
from llama_index.retrievers import BM25Retriever
from llama_index.schema import TextNode, NodeWithScore
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import (
completion_to_prompt,
)
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.agent import ReActAgent
from chatbot import ingest
from chatbot.common import *
# TODO Do prompt engineering to fix the instruction and other stuff
###########
# Prompts #
###########
chatbot_instruction = "Solve the problems given below to the best of your ability. Remember, for each wrong answer, you will be penalized - hence answer carefully and leave the answer blank or caveat when you are not sure of your solution. \nQuestion: {query_str}"
chatbot_prompt = Prompt(chatbot_instruction)
def messages_to_prompt(messages):
prompt = ""
for message in messages:
if message.role == 'system':
prompt += f"<|system|>\n{message.content}</s>\n"
elif message.role == 'user':
prompt += f"<|user|>\n{message.content}</s>\n"
elif message.role == 'assistant':
prompt += f"<|assistant|>\n{message.content}</s>\n"
# ensure we start with a system prompt, insert blank if needed
if not prompt.startswith("<|system|>\n"):
prompt = "<|system|>\n</s>\n" + prompt
# add final assistant prompt
prompt = prompt + "<|assistant|>\n"
return prompt
# Loading the model
def load_llm(model_path=MODEL_PATH, colab=False):
# Uncomment the block below for using with local llm
set_global_tokenizer(
AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta").encode
)
llm = LlamaCPP(
model_path=model_path,
context_window=5120,
max_new_tokens=1536,
temperature=0.5,
model_kwargs={"n_gpu_layers": 24 if not colab else 64},
messages_to_prompt=messages_to_prompt,
verbose=True,
)
return llm
# LLM task helpers
def build_input_prompt(message, system_prompt):
"""
Constructs the input prompt string from the chatbot interactions and the current message.
"""
input_prompt = "<|system|>\n" + system_prompt + "</s>\n<|user|>\n"
input_prompt = input_prompt + str(message) + "</s>\n<|assistant|>"
return input_prompt
def get_subject_from_query(agent, query, subjects=subjects):
fmted_subjects = ", ".join(list(subjects.keys()))
generate_responses = lambda x: str(agent.chat(x))
subject = generate_responses(
f"Of the given subjects {fmted_subjects}, which subject does the question '{query}' pertain to? Answer iOf the given subjects {fmted_subjects}, which subject does the question '{query}' pertain to? Answer in a single word containing the name of the subject.n a single word containing the name of the subject."
)
if subject not in subjects:
subject = generate_responses(
(
f"Given the query '{query}', you classified it as a {subject} question. However, that is an incorrect answer. "
f"So, keeping that in mind, classify it into one of the following categories: {fmted_subjects}. Answer in a single word containing the name of the subject."
)
)
return subject
# Search (vector, bm25, ensemble)
def search_doc_metadata(docs: List[Document], query: str, metadata_key: str, top_k=10,keep_duplicates=False):
meta_nodes = list(map(lambda x: TextNode(text=x.metadata[metadata_key]), docs))
if not keep_duplicates:
meta_nodes = list(set(meta_nodes))
retr = BM25Retriever.from_defaults(nodes=meta_nodes,similarity_top_k=top_k)
answers = retr.retrieve(query)
return list(set(map(lambda x: x.get_content(metadata_mode="all"), answers)))
# Tools and Agent defn.s and helpers
def subject_vector_tool(query_engine, subject):
vector_tool = QueryEngineTool.from_defaults(
query_engine=query_engine,
description=f"Useful for retrieving specific context for anything related to the {subject}",
)
return vector_tool
def response_agent(llm, tools, debug=False):
agent = ReActAgent.from_tools(tools=tools, llm=llm, verbose=debug)
return agent
# Personalized helper functions
def create_tools(indexes):
tools = []
for subject in indexes:
tools.append(subject_vector_tool(indexes[subject], subject))
return tools
def create_chat_agent(llm=load_llm(MODEL_PATH), tools=[], from_dict=False):
tools = list(tools.values) if from_dict else tools
return response_agent(llm=llm, tools=tools)
def chat_with_agent(agent: ReActAgent, query):
chat_response = agent.chat(chatbot_prompt.format(query_str=query))
return str(chat_response)
def summarize_text(text, paras=["<no context present>"]):
custom_prompt_tmpl = (
"<|system|>\n"
"Summarize the provided book or paragraph, emphasizing key concepts and minimizing unnecessary details. Be concise and provide the essence of the content in the least space possible in points.</s>\n"
"<|user|>\n"
"Do not summarize the following context, instead use them to decide what topics are important and which ones are unnecessary: "
"{context_str}"
"Summarize the following paragraphs only, concisely: "
"{query_str} </s>"
"<|assistant|>"
)
custom_prompt = PromptTemplate(custom_prompt_tmpl)
summarizer = TreeSummarize(verbose=True, summary_template=custom_prompt)
response = summarizer.get_response(f"{text}",paras) # Empty query
return (str(response))
llm = load_llm(model_path=MODEL_PATH)
embeddings = HuggingFaceEmbedding(model_name=EMBEDDING_MODEL)
g_service_ctx = ServiceContext.from_defaults(
llm=llm, embed_model=embeddings,
)
everything_pipeline = ingest.AugmentedIngestPipeline(data_dir_path=DATA_PATH, service_context=g_service_ctx)
everything_pipeline.run_pipeline()
# pipeline fn.s
def search_for_title(title: str, top_k: int) -> List[str]:
results = everything_pipeline.search_one_giant_index(title, top_k=top_k, metadata_key="title")
return results
def search_for_paras(para: str, top_k: int):
answers = everything_pipeline.search_one_giant_index(para, top_k=top_k, metadata_key="window")
return answers
def augmented_summarize(text: str, top_k:int = 2):
paras = search_for_paras(text, top_k)
summary = summarize_text(text, paras)
return summary
##
## externally accessible fn.s and variables
##
tools = create_tools(everything_pipeline.vector_indexes)
agent = create_chat_agent(llm=llm, tools=tools)
if __name__ == "__main__":
set_global_service_context(g_service_ctx)
print(augmented_summarize("Who is rogers?"))
# <|user|>How do I fix my friend's crippling anxiety and depression?\nYou know that {context_str}</s><|assistant|> | [
"llama_index.retrievers.BM25Retriever.from_defaults",
"llama_index.response_synthesizers.TreeSummarize",
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.agent.ReActAgent.from_tools",
"llama_index.schema.TextNode",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.LlamaCPP",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.Prompt",
"llama_index.PromptTemplate",
"llama_index.set_global_service_context"
] | [((1241, 1268), 'llama_index.Prompt', 'Prompt', (['chatbot_instruction'], {}), '(chatbot_instruction)\n', (1247, 1268), False, 'from llama_index import PromptTemplate, Document, Prompt, ServiceContext, set_global_service_context, set_global_tokenizer\n'), ((6101, 6149), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'EMBEDDING_MODEL'}), '(model_name=EMBEDDING_MODEL)\n', (6121, 6149), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((6167, 6228), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embeddings'}), '(llm=llm, embed_model=embeddings)\n', (6195, 6228), False, 'from llama_index import PromptTemplate, Document, Prompt, ServiceContext, set_global_service_context, set_global_tokenizer\n'), ((6259, 6350), 'chatbot.ingest.AugmentedIngestPipeline', 'ingest.AugmentedIngestPipeline', ([], {'data_dir_path': 'DATA_PATH', 'service_context': 'g_service_ctx'}), '(data_dir_path=DATA_PATH, service_context=\n g_service_ctx)\n', (6289, 6350), False, 'from chatbot import ingest\n'), ((2154, 2361), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model_path', 'context_window': '(5120)', 'max_new_tokens': '(1536)', 'temperature': '(0.5)', 'model_kwargs': "{'n_gpu_layers': 24 if not colab else 64}", 'messages_to_prompt': 'messages_to_prompt', 'verbose': '(True)'}), "(model_path=model_path, context_window=5120, max_new_tokens=1536,\n temperature=0.5, model_kwargs={'n_gpu_layers': 24 if not colab else 64},\n messages_to_prompt=messages_to_prompt, verbose=True)\n", (2162, 2361), False, 'from llama_index.llms import LlamaCPP\n'), ((4050, 4119), 'llama_index.retrievers.BM25Retriever.from_defaults', 'BM25Retriever.from_defaults', ([], {'nodes': 'meta_nodes', 'similarity_top_k': 'top_k'}), '(nodes=meta_nodes, similarity_top_k=top_k)\n', (4077, 4119), False, 'from llama_index.retrievers import BM25Retriever\n'), ((4340, 4499), 'llama_index.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine', 'description': 'f"""Useful for retrieving specific context for anything related to the {subject}"""'}), "(query_engine=query_engine, description=\n f'Useful for retrieving specific context for anything related to the {subject}'\n )\n", (4369, 4499), False, 'from llama_index.tools.query_engine import QueryEngineTool\n'), ((4595, 4653), 'llama_index.agent.ReActAgent.from_tools', 'ReActAgent.from_tools', ([], {'tools': 'tools', 'llm': 'llm', 'verbose': 'debug'}), '(tools=tools, llm=llm, verbose=debug)\n', (4616, 4653), False, 'from llama_index.agent import ReActAgent\n'), ((5834, 5868), 'llama_index.PromptTemplate', 'PromptTemplate', (['custom_prompt_tmpl'], {}), '(custom_prompt_tmpl)\n', (5848, 5868), False, 'from llama_index import PromptTemplate, Document, Prompt, ServiceContext, set_global_service_context, set_global_tokenizer\n'), ((5890, 5949), 'llama_index.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'verbose': '(True)', 'summary_template': 'custom_prompt'}), '(verbose=True, summary_template=custom_prompt)\n', (5903, 5949), False, 'from llama_index.response_synthesizers import TreeSummarize\n'), ((7087, 7128), 'llama_index.set_global_service_context', 'set_global_service_context', (['g_service_ctx'], {}), '(g_service_ctx)\n', (7113, 7128), False, 'from llama_index import PromptTemplate, Document, Prompt, ServiceContext, set_global_service_context, set_global_tokenizer\n'), ((2068, 2129), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""HuggingFaceH4/zephyr-7b-beta"""'], {}), "('HuggingFaceH4/zephyr-7b-beta')\n", (2097, 2129), False, 'from transformers import AutoTokenizer\n'), ((3920, 3959), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'x.metadata[metadata_key]'}), '(text=x.metadata[metadata_key])\n', (3928, 3959), False, 'from llama_index.schema import TextNode, NodeWithScore\n')] |
#%%
import llama_index
from llama_index.tools import BaseTool, FunctionTool
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
from llama_index.vector_stores import ChromaVectorStore
from llama_index import StorageContext, VectorStoreIndex
import chromadb
import phoenix as px
#%%
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b * -1
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return (a + b ) * -1
add_tool = FunctionTool.from_defaults(fn=add)
from IPython import get_ipython
def execute_code(code: str):
"""Executes the given python code in ipython"""
ipython = get_ipython()
ipython.run_code(code)
execute_code_tool = FunctionTool.from_defaults(fn=execute_code)
import os
import shutil
import subprocess
from typing import Any, Optional
def create_directory(directory_name: str) -> None:
"""
Create a new directory.
"""
os.makedirs(directory_name, exist_ok=True)
def write_file(file_path: str, content: str) -> None:
"""
Write content to a file.
"""
with open(file_path, 'w') as f:
f.write(content)
def read_file(file_path: str) -> str:
"""
Read content from a file.
"""
with open(file_path, 'r') as f:
return f.read()
def initialize_git(directory_name: str) -> None:
"""
Initialize a new git repository.
"""
subprocess.run(["git", "init"], cwd=directory_name)
def git_add_all(directory_name: str) -> None:
"""
Add all changes to git.
"""
subprocess.run(["git", "add", "."], cwd=directory_name)
def git_commit(directory_name: str, message: str) -> None:
"""
Commit changes to git.
"""
subprocess.run(["git", "commit", "-m", message], cwd=directory_name)
def git_push(directory_name: str, remote: str, branch: str) -> None:
"""
Push changes to remote repository.
"""
subprocess.run(["git", "push", remote, branch, "--force"], cwd=directory_name)
def natural_lang_query_github_repo(repo_natural_question_query: str) -> str:
"""
Ask questions about github repo in natural language about different files.
Use this function as a way to read the entire repo and ask specific questions to understand latest state of the repo and the files.
As you write new files to git, you can use this function to map out what's the latest state.
"""
import os
from llama_index import download_loader
from llama_hub.github_repo import GithubRepositoryReader, GithubClient
download_loader("GithubRepositoryReader")
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
loader = GithubRepositoryReader(
github_client,
owner = "ashtianicode",
repo = "llm-learning-notebook",
verbose = True,
concurrent_requests = 10,
)
docs = loader.load_data(branch="main")
for doc in docs:
print(doc.extra_info)
from llama_index import download_loader, GPTVectorStoreIndex
index = GPTVectorStoreIndex.from_documents(docs)
query_engine = index.as_query_engine(top_k=5)
response = query_engine.query(repo_natural_question_query)
return response
def natural_lang_query_website_reader(url: str, question:str) -> str:
from llama_index import VectorStoreIndex, SimpleWebPageReader
documents = SimpleWebPageReader(html_to_text=True).load_data(
[url]
)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(question)
return response
# def read_from_vectordb(collection_name: str, prompt: str):
# """
# Read from vectordb.
# """
# px.launch_app()
# llama_index.set_global_handler("arize_phoenix")
# chroma_client = chromadb.PersistentClient()
# chroma_collection = chroma_client.get_collection(collection_name)
# vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
# storage_context = StorageContext.from_defaults(vector_store=vector_store)
# index = VectorStoreIndex(storage_context=storage_context)
# nodes = index.retrieve(prompt, similarity_top_k=3)
# return nodes
# def write_to_vectordb(collection_name: str, text: str):
# """
# Write to vectordb.
# """
# px.launch_app()
# llama_index.set_global_handler("arize_phoenix")
# chroma_client = chromadb.PersistentClient()
# chroma_collection = chroma_client.get_or_create_collection(collection_name)
# vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
# storage_context = StorageContext.from_defaults(vector_store=vector_store)
# index = VectorStoreIndex.from_documents([text], storage_context=storage_context, show_progress=True)
# index.storage_context.persist()
create_directory_tool = FunctionTool.from_defaults(fn=create_directory)
write_file_tool = FunctionTool.from_defaults(fn=write_file)
read_file_tool = FunctionTool.from_defaults(fn=read_file)
initialize_git_tool = FunctionTool.from_defaults(fn=initialize_git)
git_add_all_tool = FunctionTool.from_defaults(fn=git_add_all)
git_commit_tool = FunctionTool.from_defaults(fn=git_commit)
git_push_tool = FunctionTool.from_defaults(fn=git_push)
natural_lang_query_github_repo_tool = FunctionTool.from_defaults(fn=natural_lang_query_github_repo)
natural_lang_query_website_reader_tool = FunctionTool.from_defaults(fn=natural_lang_query_website_reader)
# read_from_vectordb_tool = FunctionTool.from_defaults(fn=read_from_vectordb)
# write_to_vectordb_tool = FunctionTool.from_defaults(fn=write_to_vectordb)
#%%
llm = OpenAI(model="gpt-3.5-turbo")
agent = OpenAIAgent.from_tools([
multiply_tool,
add_tool,
execute_code_tool,
write_file_tool,
read_file_tool,
git_add_all_tool,
git_commit_tool,
git_push_tool,
natural_lang_query_github_repo_tool,
natural_lang_query_website_reader_tool
], llm=llm, verbose=True)
agent.chat("""
You are studying pandas API.
You must take study notes on github using the git tools available to you.
Start making a corriculum based on https://pandas.pydata.org/docs/user_guide/10min.html using the webtool to extract all topics of practice.
Then create a seperate .py file for each example snippet that you run for practice.
Use the execute_code_tool tool for running your code.
Get the results of your running code and add the result in comment form to the end of your practice file.
After each practice, push that file to git.
Do your practice one step at a time.
""")
# %%
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.tools.FunctionTool.from_defaults",
"llama_index.SimpleWebPageReader",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.llms.OpenAI",
"llama_index.download_loader",
"llama_index.agent.OpenAIAgent.from_tools"
] | [((452, 491), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'multiply'}), '(fn=multiply)\n', (478, 491), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((620, 654), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'add'}), '(fn=add)\n', (646, 654), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((845, 888), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'execute_code'}), '(fn=execute_code)\n', (871, 888), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5004, 5051), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'create_directory'}), '(fn=create_directory)\n', (5030, 5051), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5070, 5111), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'write_file'}), '(fn=write_file)\n', (5096, 5111), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5129, 5169), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'read_file'}), '(fn=read_file)\n', (5155, 5169), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5192, 5237), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'initialize_git'}), '(fn=initialize_git)\n', (5218, 5237), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5257, 5299), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'git_add_all'}), '(fn=git_add_all)\n', (5283, 5299), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5318, 5359), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'git_commit'}), '(fn=git_commit)\n', (5344, 5359), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5376, 5415), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'git_push'}), '(fn=git_push)\n', (5402, 5415), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5454, 5515), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'natural_lang_query_github_repo'}), '(fn=natural_lang_query_github_repo)\n', (5480, 5515), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5557, 5621), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'natural_lang_query_website_reader'}), '(fn=natural_lang_query_website_reader)\n', (5583, 5621), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((5789, 5818), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (5795, 5818), False, 'from llama_index.llms import OpenAI\n'), ((5828, 6091), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', (['[multiply_tool, add_tool, execute_code_tool, write_file_tool,\n read_file_tool, git_add_all_tool, git_commit_tool, git_push_tool,\n natural_lang_query_github_repo_tool, natural_lang_query_website_reader_tool\n ]'], {'llm': 'llm', 'verbose': '(True)'}), '([multiply_tool, add_tool, execute_code_tool,\n write_file_tool, read_file_tool, git_add_all_tool, git_commit_tool,\n git_push_tool, natural_lang_query_github_repo_tool,\n natural_lang_query_website_reader_tool], llm=llm, verbose=True)\n', (5850, 6091), False, 'from llama_index.agent import OpenAIAgent\n'), ((784, 797), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (795, 797), False, 'from IPython import get_ipython\n'), ((1067, 1109), 'os.makedirs', 'os.makedirs', (['directory_name'], {'exist_ok': '(True)'}), '(directory_name, exist_ok=True)\n', (1078, 1109), False, 'import os\n'), ((1523, 1574), 'subprocess.run', 'subprocess.run', (["['git', 'init']"], {'cwd': 'directory_name'}), "(['git', 'init'], cwd=directory_name)\n", (1537, 1574), False, 'import subprocess\n'), ((1670, 1725), 'subprocess.run', 'subprocess.run', (["['git', 'add', '.']"], {'cwd': 'directory_name'}), "(['git', 'add', '.'], cwd=directory_name)\n", (1684, 1725), False, 'import subprocess\n'), ((1833, 1901), 'subprocess.run', 'subprocess.run', (["['git', 'commit', '-m', message]"], {'cwd': 'directory_name'}), "(['git', 'commit', '-m', message], cwd=directory_name)\n", (1847, 1901), False, 'import subprocess\n'), ((2031, 2109), 'subprocess.run', 'subprocess.run', (["['git', 'push', remote, branch, '--force']"], {'cwd': 'directory_name'}), "(['git', 'push', remote, branch, '--force'], cwd=directory_name)\n", (2045, 2109), False, 'import subprocess\n'), ((2658, 2699), 'llama_index.download_loader', 'download_loader', (['"""GithubRepositoryReader"""'], {}), "('GithubRepositoryReader')\n", (2673, 2699), False, 'from llama_index import download_loader, GPTVectorStoreIndex\n'), ((2774, 2906), 'llama_hub.github_repo.GithubRepositoryReader', 'GithubRepositoryReader', (['github_client'], {'owner': '"""ashtianicode"""', 'repo': '"""llm-learning-notebook"""', 'verbose': '(True)', 'concurrent_requests': '(10)'}), "(github_client, owner='ashtianicode', repo=\n 'llm-learning-notebook', verbose=True, concurrent_requests=10)\n", (2796, 2906), False, 'from llama_hub.github_repo import GithubRepositoryReader, GithubClient\n'), ((3184, 3224), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (3218, 3224), False, 'from llama_index import download_loader, GPTVectorStoreIndex\n'), ((3597, 3639), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (3628, 3639), False, 'from llama_index import VectorStoreIndex, SimpleWebPageReader\n'), ((2734, 2759), 'os.getenv', 'os.getenv', (['"""GITHUB_TOKEN"""'], {}), "('GITHUB_TOKEN')\n", (2743, 2759), False, 'import os\n'), ((3515, 3553), 'llama_index.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (3534, 3553), False, 'from llama_index import VectorStoreIndex, SimpleWebPageReader\n')] |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import asyncio
from models.memory_models import (Message)
from services.config import get_option
from services.logger import setup_logger
from utilities.queue_utils import setup_queue, process_queue
from elasticsearch import AsyncElasticsearch
import llama_index
from llama_index import (ServiceContext, SimpleDirectoryReader, VectorStoreIndex, Document)
from llama_index.llms import OpenAI
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index.vector_stores import ElasticsearchStore
from llama_index.storage.storage_context import StorageContext
from memorybank.services import llamaindex_index_factory as esutils
# Get the Azure storage connection string and the save message queue from environment variables
AZURE_STORAGE_CONNECTION_STRING = get_option('AZURE_STORAGE_CONNECTION_STRING', is_required=True)
SAVE_MESSAGE_QUEUE = get_option('SAVE_MESSAGE_QUEUE', is_required=True)
DELETE_QUEUE = False
REMOVE_MESSAGES = True
# Set up logging
logger = setup_logger(__name__)
# Functionality
async def main():
index = await esutils.get_index()
def index_message(resource_dict):
message = Message(**resource_dict)
# For now we just print a message
doc = Document(
text=message.text,
metadata={"collection": message.collection, "author": "user" },
)
index.insert(doc)
print(f"Saving message '{message.text}' in collection {message.collection}")
queue_service = setup_queue(AZURE_STORAGE_CONNECTION_STRING, SAVE_MESSAGE_QUEUE, DELETE_QUEUE)
process_queue(queue_service, SAVE_MESSAGE_QUEUE, index_message, logger, REMOVE_MESSAGES)
if __name__ == "__main__":
asyncio.run(main()) | [
"llama_index.Document"
] | [((877, 940), 'services.config.get_option', 'get_option', (['"""AZURE_STORAGE_CONNECTION_STRING"""'], {'is_required': '(True)'}), "('AZURE_STORAGE_CONNECTION_STRING', is_required=True)\n", (887, 940), False, 'from services.config import get_option\n'), ((962, 1012), 'services.config.get_option', 'get_option', (['"""SAVE_MESSAGE_QUEUE"""'], {'is_required': '(True)'}), "('SAVE_MESSAGE_QUEUE', is_required=True)\n", (972, 1012), False, 'from services.config import get_option\n'), ((1085, 1107), 'services.logger.setup_logger', 'setup_logger', (['__name__'], {}), '(__name__)\n', (1097, 1107), False, 'from services.logger import setup_logger\n'), ((1609, 1687), 'utilities.queue_utils.setup_queue', 'setup_queue', (['AZURE_STORAGE_CONNECTION_STRING', 'SAVE_MESSAGE_QUEUE', 'DELETE_QUEUE'], {}), '(AZURE_STORAGE_CONNECTION_STRING, SAVE_MESSAGE_QUEUE, DELETE_QUEUE)\n', (1620, 1687), False, 'from utilities.queue_utils import setup_queue, process_queue\n'), ((1692, 1784), 'utilities.queue_utils.process_queue', 'process_queue', (['queue_service', 'SAVE_MESSAGE_QUEUE', 'index_message', 'logger', 'REMOVE_MESSAGES'], {}), '(queue_service, SAVE_MESSAGE_QUEUE, index_message, logger,\n REMOVE_MESSAGES)\n', (1705, 1784), False, 'from utilities.queue_utils import setup_queue, process_queue\n'), ((1162, 1181), 'memorybank.services.llamaindex_index_factory.get_index', 'esutils.get_index', ([], {}), '()\n', (1179, 1181), True, 'from memorybank.services import llamaindex_index_factory as esutils\n'), ((1243, 1267), 'models.memory_models.Message', 'Message', ([], {}), '(**resource_dict)\n', (1250, 1267), False, 'from models.memory_models import Message\n'), ((1324, 1418), 'llama_index.Document', 'Document', ([], {'text': 'message.text', 'metadata': "{'collection': message.collection, 'author': 'user'}"}), "(text=message.text, metadata={'collection': message.collection,\n 'author': 'user'})\n", (1332, 1418), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, Document\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from launcher import create_app
from di import global_injector
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((192, 232), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (222, 232), False, 'import llama_index\n'), ((240, 267), 'launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (250, 267), False, 'from launcher import create_app\n')] |
import os
import streamlit as st
import openai
from core.pipeline_builder import build_query_pipeline
from core.index_builder.inquiry_index_builder import load_inquiry_index
from core.index_builder.act_index_builder import (
load_act_index,
load_act_enforcement_index,
)
from core.utils import draw_dag
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
import llama_index.core
os.environ["OPENAI_API_KEY"] = st.secrets.openai_key
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
embed_model = OpenAIEmbedding(
model="text-embedding-3-small",
)
Settings.embed_model = embed_model
st.set_page_config(
page_title="ICT Construction Chatbot",
page_icon="👷",
layout="centered",
initial_sidebar_state="auto",
menu_items=None,
)
openai.api_key = st.secrets.openai_key
st.title("ICT 건설 컨설턴트, powered by Wordbricks 👷💬")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{
"role": "assistant",
"content": "궁금한 사항을 물어보세요. ICT 건설에 대한 전문가봇이 답변해드립니다.",
}
]
@st.cache_resource(show_spinner=False)
def load_indexes():
with st.spinner(text="데이터를 로딩중 입니다. 잠시만 기다려주세요."):
inquiry_index = load_inquiry_index()
act_index = load_act_index()
act_enforcement_index = load_act_enforcement_index()
return {
"inquiry": inquiry_index,
"act": act_index,
"act_enforcement": act_enforcement_index,
}
indexes = load_indexes()
qp = build_query_pipeline(indexes)
draw_dag(qp)
if "query_pipeline" not in st.session_state.keys(): # Initialize the chat engine
st.session_state.query_pipeline = qp
if prompt := st.chat_input("질문"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("생각중..."):
response = st.session_state.query_pipeline.run(query_str=prompt)
st.write(str(response))
message = {"role": "assistant", "content": str(response)}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((510, 525), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (523, 525), True, 'import phoenix as px\n'), ((619, 666), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-3-small"""'}), "(model='text-embedding-3-small')\n", (634, 666), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((710, 852), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""ICT Construction Chatbot"""', 'page_icon': '"""👷"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title='ICT Construction Chatbot', page_icon='👷',\n layout='centered', initial_sidebar_state='auto', menu_items=None)\n", (728, 852), True, 'import streamlit as st\n'), ((911, 960), 'streamlit.title', 'st.title', (['"""ICT 건설 컨설턴트, powered by Wordbricks 👷💬"""'], {}), "('ICT 건설 컨설턴트, powered by Wordbricks 👷💬')\n", (919, 960), True, 'import streamlit as st\n'), ((1211, 1248), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1228, 1248), True, 'import streamlit as st\n'), ((1648, 1677), 'core.pipeline_builder.build_query_pipeline', 'build_query_pipeline', (['indexes'], {}), '(indexes)\n', (1668, 1677), False, 'from core.pipeline_builder import build_query_pipeline\n'), ((1678, 1690), 'core.utils.draw_dag', 'draw_dag', (['qp'], {}), '(qp)\n', (1686, 1690), False, 'from core.utils import draw_dag\n'), ((983, 1006), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1004, 1006), True, 'import streamlit as st\n'), ((1719, 1742), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1740, 1742), True, 'import streamlit as st\n'), ((1829, 1848), 'streamlit.chat_input', 'st.chat_input', (['"""질문"""'], {}), "('질문')\n", (1842, 1848), True, 'import streamlit as st\n'), ((1904, 1973), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1936, 1973), True, 'import streamlit as st\n'), ((1278, 1322), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""데이터를 로딩중 입니다. 잠시만 기다려주세요."""'}), "(text='데이터를 로딩중 입니다. 잠시만 기다려주세요.')\n", (1288, 1322), True, 'import streamlit as st\n'), ((1348, 1368), 'core.index_builder.inquiry_index_builder.load_inquiry_index', 'load_inquiry_index', ([], {}), '()\n', (1366, 1368), False, 'from core.index_builder.inquiry_index_builder import load_inquiry_index\n'), ((1389, 1405), 'core.index_builder.act_index_builder.load_act_index', 'load_act_index', ([], {}), '()\n', (1403, 1405), False, 'from core.index_builder.act_index_builder import load_act_index, load_act_enforcement_index\n'), ((1438, 1466), 'core.index_builder.act_index_builder.load_act_enforcement_index', 'load_act_enforcement_index', ([], {}), '()\n', (1464, 1466), False, 'from core.index_builder.act_index_builder import load_act_index, load_act_enforcement_index\n'), ((2061, 2093), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2076, 2093), True, 'import streamlit as st\n'), ((2103, 2131), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2111, 2131), True, 'import streamlit as st\n'), ((2264, 2292), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2279, 2292), True, 'import streamlit as st\n'), ((2307, 2327), 'streamlit.spinner', 'st.spinner', (['"""생각중..."""'], {}), "('생각중...')\n", (2317, 2327), True, 'import streamlit as st\n'), ((2352, 2405), 'streamlit.session_state.query_pipeline.run', 'st.session_state.query_pipeline.run', ([], {'query_str': 'prompt'}), '(query_str=prompt)\n', (2387, 2405), True, 'import streamlit as st\n'), ((2524, 2565), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2556, 2565), True, 'import streamlit as st\n')] |
"""Base vector store index query."""
from typing import Any, Dict, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K
from llama_index.core.data_structs.data_structs import IndexDict
from llama_index.core.indices.utils import log_vector_store_query_result
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from llama_index.core.schema import NodeWithScore, ObjectType, QueryBundle
from llama_index.core.vector_stores.types import (
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class VectorIndexRetriever(BaseRetriever):
"""Vector index retriever.
Args:
index (VectorStoreIndex): vector store index.
similarity_top_k (int): number of top k results to return.
vector_store_query_mode (str): vector store query mode
See reference for VectorStoreQueryMode for full list of supported modes.
filters (Optional[MetadataFilters]): metadata filters, defaults to None
alpha (float): weight for sparse/dense retrieval, only used for
hybrid query mode.
doc_ids (Optional[List[str]]): list of documents to constrain search.
vector_store_kwargs (dict): Additional vector store specific kwargs to pass
through to the vector store at query time.
"""
def __init__(
self,
index: VectorStoreIndex,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
vector_store_query_mode: VectorStoreQueryMode = VectorStoreQueryMode.DEFAULT,
filters: Optional[MetadataFilters] = None,
alpha: Optional[float] = None,
node_ids: Optional[List[str]] = None,
doc_ids: Optional[List[str]] = None,
sparse_top_k: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
embed_model: Optional[BaseEmbedding] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._vector_store = self._index.vector_store
self._embed_model = embed_model or self._index._embed_model
self._docstore = self._index.docstore
self._similarity_top_k = similarity_top_k
self._vector_store_query_mode = VectorStoreQueryMode(vector_store_query_mode)
self._alpha = alpha
self._node_ids = node_ids
self._doc_ids = doc_ids
self._filters = filters
self._sparse_top_k = sparse_top_k
self._kwargs: Dict[str, Any] = kwargs.get("vector_store_kwargs", {})
callback_manager = callback_manager or CallbackManager()
super().__init__(
callback_manager=callback_manager,
object_map=object_map,
verbose=verbose,
)
@property
def similarity_top_k(self) -> int:
"""Return similarity top k."""
return self._similarity_top_k
@similarity_top_k.setter
def similarity_top_k(self, similarity_top_k: int) -> None:
"""Set similarity top k."""
self._similarity_top_k = similarity_top_k
@dispatcher.span
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
if self._vector_store.is_embedding_query:
if query_bundle.embedding is None and len(query_bundle.embedding_strs) > 0:
query_bundle.embedding = (
self._embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
return self._get_nodes_with_embeddings(query_bundle)
@dispatcher.span
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
if self._vector_store.is_embedding_query:
if query_bundle.embedding is None and len(query_bundle.embedding_strs) > 0:
embed_model = self._embed_model
query_bundle.embedding = (
await embed_model.aget_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
return await self._aget_nodes_with_embeddings(query_bundle)
def _build_vector_store_query(
self, query_bundle_with_embeddings: QueryBundle
) -> VectorStoreQuery:
return VectorStoreQuery(
query_embedding=query_bundle_with_embeddings.embedding,
similarity_top_k=self._similarity_top_k,
node_ids=self._node_ids,
doc_ids=self._doc_ids,
query_str=query_bundle_with_embeddings.query_str,
mode=self._vector_store_query_mode,
alpha=self._alpha,
filters=self._filters,
sparse_top_k=self._sparse_top_k,
)
def _build_node_list_from_query_result(
self, query_result: VectorStoreQueryResult
) -> List[NodeWithScore]:
if query_result.nodes is None:
# NOTE: vector store does not keep text and returns node indices.
# Need to recover all nodes from docstore
if query_result.ids is None:
raise ValueError(
"Vector store query result should return at "
"least one of nodes or ids."
)
assert isinstance(self._index.index_struct, IndexDict)
node_ids = [
self._index.index_struct.nodes_dict[idx] for idx in query_result.ids
]
nodes = self._docstore.get_nodes(node_ids)
query_result.nodes = nodes
else:
# NOTE: vector store keeps text, returns nodes.
# Only need to recover image or index nodes from docstore
for i in range(len(query_result.nodes)):
source_node = query_result.nodes[i].source_node
if (not self._vector_store.stores_text) or (
source_node is not None and source_node.node_type != ObjectType.TEXT
):
node_id = query_result.nodes[i].node_id
if self._docstore.document_exists(node_id):
query_result.nodes[i] = self._docstore.get_node(
node_id
) # type: ignore[index]
log_vector_store_query_result(query_result)
node_with_scores: List[NodeWithScore] = []
for ind, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[ind]
node_with_scores.append(NodeWithScore(node=node, score=score))
return node_with_scores
def _get_nodes_with_embeddings(
self, query_bundle_with_embeddings: QueryBundle
) -> List[NodeWithScore]:
query = self._build_vector_store_query(query_bundle_with_embeddings)
query_result = self._vector_store.query(query, **self._kwargs)
return self._build_node_list_from_query_result(query_result)
async def _aget_nodes_with_embeddings(
self, query_bundle_with_embeddings: QueryBundle
) -> List[NodeWithScore]:
query = self._build_vector_store_query(query_bundle_with_embeddings)
query_result = await self._vector_store.aquery(query, **self._kwargs)
return self._build_node_list_from_query_result(query_result)
| [
"llama_index.core.indices.utils.log_vector_store_query_result",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.vector_stores.types.VectorStoreQueryMode",
"llama_index.core.vector_stores.types.VectorStoreQuery",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.schema.NodeWithScore"
] | [((838, 873), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (863, 873), True, 'import llama_index.core.instrumentation as instrument\n'), ((2619, 2664), 'llama_index.core.vector_stores.types.VectorStoreQueryMode', 'VectorStoreQueryMode', (['vector_store_query_mode'], {}), '(vector_store_query_mode)\n', (2639, 2664), False, 'from llama_index.core.vector_stores.types import MetadataFilters, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult\n'), ((4655, 4994), 'llama_index.core.vector_stores.types.VectorStoreQuery', 'VectorStoreQuery', ([], {'query_embedding': 'query_bundle_with_embeddings.embedding', 'similarity_top_k': 'self._similarity_top_k', 'node_ids': 'self._node_ids', 'doc_ids': 'self._doc_ids', 'query_str': 'query_bundle_with_embeddings.query_str', 'mode': 'self._vector_store_query_mode', 'alpha': 'self._alpha', 'filters': 'self._filters', 'sparse_top_k': 'self._sparse_top_k'}), '(query_embedding=query_bundle_with_embeddings.embedding,\n similarity_top_k=self._similarity_top_k, node_ids=self._node_ids,\n doc_ids=self._doc_ids, query_str=query_bundle_with_embeddings.query_str,\n mode=self._vector_store_query_mode, alpha=self._alpha, filters=self.\n _filters, sparse_top_k=self._sparse_top_k)\n', (4671, 4994), False, 'from llama_index.core.vector_stores.types import MetadataFilters, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult\n'), ((6608, 6651), 'llama_index.core.indices.utils.log_vector_store_query_result', 'log_vector_store_query_result', (['query_result'], {}), '(query_result)\n', (6637, 6651), False, 'from llama_index.core.indices.utils import log_vector_store_query_result\n'), ((2958, 2975), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (2973, 2975), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((6947, 6984), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node', 'score': 'score'}), '(node=node, score=score)\n', (6960, 6984), False, 'from llama_index.core.schema import NodeWithScore, ObjectType, QueryBundle\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr # type: ignore
from llama_index.indices.service_context import ServiceContext
from llama_index.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
google_service_context = ServiceContext.from_defaults(
# Avoids instantiating OpenAI as the default model.
llm=None,
# Avoids instantiating HuggingFace as the default model.
embed_model=None,
)
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id")
index = VectorStoreIndex.from_vector_store(
google_vector_store,
service_context=google_service_context)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(cls, *, corpus_id: str) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id: ID of an existing corpus on Google's server.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(corpus_id=corpus_id, client=client)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.vector_stores.types.VectorStoreQuery`.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
return VectorStoreQueryResult(
nodes=[
TextNode(
text=chunk.chunk.data.string_value,
id_=_extract_chunk_id(chunk.chunk.name),
)
for chunk in relevant_chunks
],
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str",
"llama_index.vector_stores.google.generativeai.genai_extension.get_corpus",
"llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever",
"llama_index.bridge.pydantic.Field",
"llama_index.vector_stores.google.generativeai.genai_extension.delete_document",
"llama_index.vector_stores.google.generativeai.genai_extension.create_corpus",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.schema.RelatedNodeInfo",
"llama_index.vector_stores.google.generativeai.genai_extension.get_document",
"llama_index.vector_stores.google.generativeai.genai_extension.set_config",
"llama_index.indices.service_context.ServiceContext.from_defaults",
"llama_index.vector_stores.google.generativeai.genai_extension.Config"
] | [((843, 870), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (860, 870), False, 'import logging\n'), ((1036, 1092), 'llama_index.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'None'}), '(llm=None, embed_model=None)\n', (1064, 1092), False, 'from llama_index.indices.service_context import ServiceContext\n'), ((3113, 3135), 'llama_index.vector_stores.google.generativeai.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (3126, 3135), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((3140, 3165), 'llama_index.vector_stores.google.generativeai.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (3157, 3165), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4180, 4198), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4185, 4198), False, 'from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4288, 4301), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4299, 4301), False, 'from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((5550, 5583), 'llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (5581, 5583), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7243, 7276), 'llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (7274, 7276), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7353, 7444), 'llama_index.vector_stores.google.generativeai.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (7373, 7444), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7478, 7521), 'llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (7504, 7521), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9606, 9653), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (9610, 9653), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11370, 11417), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (11374, 11417), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11426, 11517), 'llama_index.vector_stores.google.generativeai.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (11448, 11517), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((13029, 13076), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13033, 13076), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((14910, 14949), 'llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (14936, 14949), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5595, 5648), 'llama_index.vector_stores.google.generativeai.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (5612, 5648), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9859, 9948), 'llama_index.vector_stores.google.generativeai.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (9878, 9948), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((15574, 15614), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (15589, 15614), False, 'from llama_index.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((7318, 7330), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7328, 7330), False, 'import uuid\n')] |
import csv
import time
import logging
import os
import inspect
# import fitz
from datetime import datetime
from functools import wraps
import shutil
from pathlib import Path
from google.oauth2.credentials import Credentials
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
import subprocess
from langchain.embeddings import OpenAIEmbeddings
from llama_index.legacy import OpenAIEmbedding
from llama_index.legacy.embeddings import HuggingFaceEmbedding
from llama_index.legacy.core.llms.types import ChatMessage, MessageRole
import os
import subprocess
from llama_index.vector_stores.pinecone import PineconeVectorStore
from pinecone import Pinecone
def root_directory() -> str:
"""
Determine the root directory of the project. It checks if it's running in a Docker container and adjusts accordingly.
Returns:
- str: The path to the root directory of the project.
"""
# Check if running in a Docker container
if os.path.exists('/.dockerenv'):
# If inside a Docker container, use '/app' as the root directory
return '/app'
# If not in a Docker container, try to use the git command to find the root directory
try:
git_root = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'], stderr=subprocess.STDOUT)
return git_root.strip().decode('utf-8')
except subprocess.CalledProcessError:
# Git command failed, which might mean we're not in a Git repository
# Fall back to manual traversal
pass
except Exception as e:
# Some other error occurred while trying to execute git command
print(f"An error occurred while trying to find the Git repository root: {e}")
# Manual traversal if git command fails
current_dir = os.getcwd()
root = os.path.abspath(os.sep)
traversal_count = 0 # Track the number of levels traversed
while current_dir != root:
try:
if 'src' in os.listdir(current_dir):
print(f"Found root directory: {current_dir}")
return current_dir
current_dir = os.path.dirname(current_dir)
traversal_count += 1
print(f"Traversal count # {traversal_count}")
if traversal_count > 10:
raise Exception("Exceeded maximum traversal depth (more than 10 levels).")
except PermissionError as e:
# Could not access a directory due to permission issues
raise Exception(f"Permission denied when accessing directory: {current_dir}") from e
except FileNotFoundError as e:
# The directory was not found, which should not happen unless the filesystem is changing
raise Exception(f"The directory was not found: {current_dir}") from e
except OSError as e:
# Handle any other OS-related errors
raise Exception("An OS error occurred while searching for the Git repository root.") from e
# If we've reached this point, it means we've hit the root of the file system without finding a .git directory
raise Exception("Could not find the root directory of the project. Please make sure you are running this script from within a Git repository.")
def start_logging(log_prefix):
# Ensure that root_directory() is defined and returns the path to the root directory
logs_dir = f'{root_directory()}/logs/txt'
# Create a 'logs' directory if it does not exist, with exist_ok=True to avoid FileExistsError
os.makedirs(logs_dir, exist_ok=True)
# Get the current date and time
now = datetime.now()
timestamp_str = now.strftime('%Y-%m-%d_%H-%M')
# Set up the logging level
root_logger = logging.getLogger()
# If handlers are already present, we can disable them.
if root_logger.hasHandlers():
# Clear existing handlers from the root logger
root_logger.handlers.clear()
root_logger.setLevel(logging.INFO)
# Add handler to log messages to a file
log_filename = f'{logs_dir}/{timestamp_str}_{log_prefix}.log'
file_handler = logging.FileHandler(log_filename)
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
root_logger.addHandler(file_handler)
# Add handler to log messages to the standard output
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
root_logger.addHandler(console_handler)
# Now, any logging.info() call will append the log message to the specified file and the standard output.
logging.info(f'********* {log_prefix} LOGGING STARTED *********')
def timeit(func):
"""
A decorator that logs the time a function takes to execute along with the directory and filename.
Args:
func (callable): The function being decorated.
Returns:
callable: The wrapped function.
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""
The wrapper function to execute the decorated function and log its execution time and location.
Args:
*args: Variable length argument list to pass to the decorated function.
**kwargs: Arbitrary keyword arguments to pass to the decorated function.
Returns:
The value returned by the decorated function.
"""
if os.getenv('ENVIRONMENT') == 'LOCAL':
# Get the current file's path and extract directory and filename
file_path = inspect.getfile(func)
directory, filename = os.path.split(file_path)
dir_name = os.path.basename(directory)
# Log start of function execution
logging.info(f"{dir_name}.{filename}.{func.__name__} STARTED.")
start_time = time.time()
# Call the decorated function and store its result
result = func(*args, **kwargs)
end_time = time.time()
elapsed_time = end_time - start_time
minutes, seconds = divmod(elapsed_time, 60)
# Log end of function execution
logging.info(f"{dir_name}.{filename}.{func.__name__} COMPLETED, took {int(minutes)} minutes and {seconds:.2f} seconds to run.\n")
return result
else:
# If not in 'LOCAL' environment, just call the function without timing
return func(*args, **kwargs)
return wrapper
def authenticate_service_account(service_account_file: str) -> Credentials:
"""Authenticates using service account and returns the session."""
credentials = ServiceAccountCredentials.from_service_account_file(
service_account_file,
scopes=["https://www.googleapis.com/auth/youtube.readonly"]
)
return credentials
def get_last_index_embedding_params():
index_dir = f"{root_directory()}/.storage/research_pdf/"
index = sorted(os.listdir(index_dir))[-1].split('_')
index_date = index[0]
embedding_model_name = index[1]
embedding_model_chunk_size = int(index[2])
chunk_overlap = int(index[3])
vector_space_distance_metric = 'cosine' # TODO 2023-11-02: save vector_space_distance_metric in index name
return embedding_model_name, embedding_model_chunk_size, chunk_overlap, vector_space_distance_metric
import os
import fnmatch
import re
def find_matching_files(directory: str):
mp3_files = []
json_txt_files = []
# 1. Recursively walk through the directory and collect paths to all .mp3, .json, and .txt files
for dirpath, _, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, "*.mp3"):
mp3_files.append(os.path.join(dirpath, filename))
for filename in fnmatch.filter(filenames, "*.json"):
json_txt_files.append(os.path.join(dirpath, filename))
for filename in fnmatch.filter(filenames, "*.txt"):
json_txt_files.append(os.path.join(dirpath, filename))
matched_tuples = []
for mp3_file in mp3_files:
mp3_basename = os.path.basename(mp3_file).rsplit('.', 1)[0]
for jt_file in json_txt_files:
jt_basename = os.path.basename(jt_file).rsplit('.', 1)[0]
# Remove prefix date if it exists
jt_basename = re.sub(r'^\d{4}-\d{2}-\d{2}_', '', jt_basename)
# Remove various suffixes
jt_basename = re.sub(r'(_diarized_content(_processed_diarized)?)$', '', jt_basename)
if mp3_basename == jt_basename:
matched_tuples.append((mp3_file, jt_file))
# 3. For each match, print the tuple and then later delete the .mp3 file
for mp3_file, jt_file in matched_tuples:
print((mp3_file, jt_file))
if os.path.exists(mp3_file):
os.remove(mp3_file)
print(f"Deleting {mp3_file}")
import pandas as pd
def find_closest_match(video_title, df_titles):
max_overlap = 0
best_match = None
for title in df_titles:
# Ensure title is a string before iterating
title_str = str(title)
overlap = sum(1 for a, b in zip(video_title, title_str) if a == b)
if overlap > max_overlap:
max_overlap = overlap
best_match = title_str
return best_match
def move_remaining_mp3_to_their_subdirs():
# Load the DataFrame
videos_path = f"{root_directory()}/datasets/evaluation_data/youtube_videos.csv"
youtube_videos_df = pd.read_csv(videos_path)
youtube_videos_df['title'] = youtube_videos_df['title'].str.replace(' +', ' ', regex=True)
youtube_videos_df['title'] = youtube_videos_df['title'].str.replace('"', '', regex=True)
# Get a list of all mp3 files in the directory and subdirectories
mp3_files = []
for subdir, dirs, files in os.walk(f"{root_directory()}/datasets/evaluation_data/diarized_youtube_content_2023-10-06"):
for file in files:
if file.endswith(".mp3"):
mp3_files.append(os.path.join(subdir, file))
df_titles = youtube_videos_df['title'].tolist()
# Process each mp3 file
for mp3_file in mp3_files:
# Extract the segment after the last "/"
video_title = mp3_file.split('/')[-1].rsplit('.', 1)[0]
# Replace double spaces with a single space
video_title = video_title.replace(' ', ' ').strip()
# Check if mp3 file is already in a directory matching its name
containing_dir = os.path.basename(os.path.dirname(mp3_file))
if video_title == containing_dir:
continue
best_match = find_closest_match(video_title, df_titles)
video_row = youtube_videos_df[youtube_videos_df['title'] == best_match]
if not video_row.empty:
published_date = video_row.iloc[0]['published_date']
new_dir_name = f"{published_date}_{video_title}"
new_dir_path = os.path.join(os.path.dirname(mp3_file), new_dir_name)
os.makedirs(new_dir_path, exist_ok=True)
new_file_name = f"{published_date}_{video_title}.mp3"
new_file_path = os.path.join(new_dir_path, new_file_name)
print(f"Moved video {best_match} to {new_file_path}!")
shutil.move(mp3_file, new_file_path)
else:
print(f"No matching video title found in DataFrame for: {video_title}")
def move_remaining_txt_to_their_subdirs():
# Load the DataFrame
videos_path = f"{root_directory()}/datasets/evaluation_data/youtube_videos.csv"
youtube_videos_df = pd.read_csv(videos_path)
youtube_videos_df['title'] = youtube_videos_df['title'].str.replace(' +', ' ', regex=True)
youtube_videos_df['title'] = youtube_videos_df['title'].str.replace('"', '', regex=True)
# Get a list of all txt files in the directory and subdirectories
txt_files = []
for subdir, dirs, files in os.walk(f"{root_directory()}/datasets/evaluation_data/diarized_youtube_content_2023-10-06"):
for file in files:
if file.endswith("_diarized_content_processed_diarized.txt"):
txt_files.append(os.path.join(subdir, file))
df_titles = youtube_videos_df['title'].tolist()
# Process each txt file
for txt_file in txt_files:
# Extract the segment after the last "/"
extension = "_diarized_content_processed_diarized.txt"
video_title = txt_file.replace(extension, '').split('/')[-1].rsplit('.', 1)[0]
# Replace double spaces with a single space
video_title = video_title.replace(' ', ' ').strip()
# video_row = youtube_videos_df[youtube_videos_df['title'].str.contains(video_title, case=False, na=False, regex=False)]
best_match = find_closest_match(video_title, df_titles)
video_row = youtube_videos_df[youtube_videos_df['title'] == best_match]
if not video_row.empty:
published_date = video_row.iloc[0]['published_date']
new_dir_name = f"{published_date}_{video_title}"
# Check if txt file is already in a directory matching its name
containing_dir = os.path.basename(os.path.dirname(txt_file))
if new_dir_name == containing_dir:
continue
new_dir_path = os.path.join(os.path.dirname(txt_file), new_dir_name)
os.makedirs(new_dir_path, exist_ok=True)
new_file_name = f"{published_date}_{video_title}{extension}"
new_file_path = os.path.join(new_dir_path, new_file_name)
if os.path.exists(new_file_path):
print(f"Deleted {txt_file} because {new_file_path} already exists")
os.remove(txt_file)
else:
print(f"Moved video {txt_file} to {new_file_path}!")
shutil.move(txt_file, new_file_path)
else:
print(f"No matching video title found in DataFrame for: {video_title}")
def move_remaining_json_to_their_subdirs():
# Load the DataFrame
videos_path = f"{root_directory()}/datasets/evaluation_data/youtube_videos.csv"
youtube_videos_df = pd.read_csv(videos_path)
youtube_videos_df['title'] = youtube_videos_df['title'].str.replace(' +', ' ', regex=True)
youtube_videos_df['title'] = youtube_videos_df['title'].str.replace('"', '', regex=True)
# Get a list of all json files in the directory and subdirectories
json_files = []
for subdir, dirs, files in os.walk(f"{root_directory()}/datasets/evaluation_data/diarized_youtube_content_2023-10-06"):
for file in files:
if file.endswith("_diarized_content.json"):
json_files.append(os.path.join(subdir, file))
df_titles = youtube_videos_df['title'].tolist()
# Process each json file
for json_file in json_files:
# Extract the segment after the last "/"
extension = "_diarized_content.json"
video_title = json_file.replace(extension, '').split('/')[-1].rsplit('.', 1)[0]
# Replace double spaces with a single space
video_title = video_title.replace(' ', ' ').strip()
# video_row = youtube_videos_df[youtube_videos_df['title'].str.contains(video_title, case=False, na=False, regex=False)]
best_match = find_closest_match(video_title, df_titles)
video_row = youtube_videos_df[youtube_videos_df['title'] == best_match]
if not video_row.empty:
published_date = video_row.iloc[0]['published_date']
new_dir_name = f"{published_date}_{video_title}"
# Check if json file is already in a directory matching its name
containing_dir = os.path.basename(os.path.dirname(json_file))
if new_dir_name == containing_dir:
continue
new_dir_path = os.path.join(os.path.dirname(json_file), new_dir_name)
os.makedirs(new_dir_path, exist_ok=True)
new_file_name = f"{published_date}_{video_title}{extension}"
new_file_path = os.path.join(new_dir_path, new_file_name)
if os.path.exists(new_file_path):
print(f"Deleted {json_file} because {new_file_path} already exists")
os.remove(json_file)
else:
print(f"Moved video {json_file} to {new_file_path}!")
shutil.move(json_file, new_file_path)
else:
print(f"No matching video title found in DataFrame for: {video_title}")
def merge_directories(base_path):
'''
This function walks through all subdirectories and merges the contents of directories that have
names differing only by the pipe character used, from fullwidth to ASCII. Files from the fullwidth
pipe directory are moved to the ASCII pipe directory, and if a file with the same name exists, the
file from the fullwidth pipe directory is deleted. After the merge, the fullwidth pipe directory is
deleted if empty.
Args:
base_path: The base directory path to start searching from.
Returns: None
'''
# Helper function to rename the pipe character
def standardize_name(dir_or_file_name):
return dir_or_file_name.replace(':', ':')
# Track directories to be removed after processing
dirs_to_remove = []
# Walk through the directory structure
for root, dirs, _ in os.walk(base_path):
# Map of standard directory names to their full paths
standard_dirs = {}
# First pass to fill in the mapping
for dir_name in dirs:
standard_dirs[standardize_name(dir_name)] = os.path.join(root, dir_name)
# Second pass to perform the merging
for dir_name in dirs:
standard_name = standardize_name(dir_name)
src = os.path.join(root, dir_name)
dst = standard_dirs[standard_name]
# Only proceed if the directory names actually differ (by the pipe character)
if src != dst:
if not os.path.exists(dst):
# If the destination doesn't exist, simply rename the directory
os.rename(src, dst)
print(f"Renamed {src} to {dst}")
else:
# Merge contents
for item in os.listdir(src):
src_item = os.path.join(src, item)
dst_item = os.path.join(dst, standardize_name(item))
if os.path.exists(dst_item):
# If there is a conflict, delete the source item
os.remove(src_item)
print(f"Deleted due to conflict: {src_item}")
else:
shutil.move(src_item, dst_item)
print(f"Moved {src_item} to {dst_item}")
# Add to list of directories to remove if they are empty
dirs_to_remove.append(src)
# Remove the source directories if they are empty
for dir_to_remove in dirs_to_remove:
if not os.listdir(dir_to_remove):
os.rmdir(dir_to_remove)
print(f"Removed empty directory: {dir_to_remove}")
else:
print(f"Directory {dir_to_remove} is not empty after merge. Please check contents.")
def replace_fullwidth_colon_and_clean():
base_path = f"{root_directory()}/datasets/evaluation_data/diarized_youtube_content_2023-10-06"
for root, dirs, files in os.walk(base_path):
json_files = set()
# First, collect all .json filenames without extension
for file in files:
if file.endswith('.json'):
json_files.add(file[:-5]) # Removes the '.json' part
# Next, iterate over files and process them
for file in files:
original_file_path = os.path.join(root, file)
if ':' in file:
# Replace the fullwidth colon with a standard colon
new_file_name = file.replace('|', '|') # return dir_or_file_name.replace('|', '|')
new_file_path = os.path.join(root, new_file_name)
if os.path.exists(new_file_path):
# If the ASCII version exists, delete the fullwidth version
print(f"Deleted {original_file_path}")
os.remove(original_file_path)
else:
# Otherwise, rename the file
print(f"Renamed {original_file_path} to {new_file_path}")
os.rename(original_file_path, new_file_path)
# If a corresponding .json file exists, delete the .mp3 file
if file[:-4] in json_files and file.endswith('.mp3'):
os.remove(original_file_path)
print(f"Deleted .mp3 file {original_file_path} because a corresponding .json exists")
def fullwidth_to_ascii(char):
"""Converts a full-width character to its ASCII equivalent."""
# Full-width range: 0xFF01-0xFF5E
# Corresponding ASCII range: 0x21-0x7E
fullwidth_offset = 0xFF01 - 0x21
return chr(ord(char) - fullwidth_offset) if 0xFF01 <= ord(char) <= 0xFF5E else char
def clean_fullwidth_characters(base_path):
for root, dirs, files in os.walk(base_path, topdown=False): # topdown=False to start from the innermost directories
# First handle the files in the directories
for file in files:
new_file_name = ''.join(fullwidth_to_ascii(char) for char in file)
original_file_path = os.path.join(root, file)
new_file_path = os.path.join(root, new_file_name)
if new_file_name != file:
if os.path.exists(new_file_path):
# If the ASCII version exists, delete the full-width version
os.remove(original_file_path)
print(f"Deleted {original_file_path}")
else:
# Otherwise, rename the file
os.rename(original_file_path, new_file_path)
print(f"Renamed {original_file_path} to {new_file_path}")
# Then handle directories
for dir in dirs:
new_dir_name = ''.join(fullwidth_to_ascii(char) for char in dir)
original_dir_path = os.path.join(root, dir)
new_dir_path = os.path.join(root, new_dir_name)
if new_dir_name != dir:
if os.path.exists(new_dir_path):
# If the ASCII version exists, delete the full-width version and its contents
shutil.rmtree(original_dir_path)
print(f"Deleted directory and all contents: {original_dir_path}")
else:
# Otherwise, rename the directory
os.rename(original_dir_path, new_dir_path)
print(f"Renamed {original_dir_path} to {new_dir_path}")
def delete_mp3_if_text_or_json_exists(base_path):
for root, dirs, _ in os.walk(base_path):
for dir in dirs:
subdir_path = os.path.join(root, dir)
# Get a list of files in the current subdirectory
files = os.listdir(subdir_path)
# Filter out .mp3, .txt and .json files
mp3_files = [file for file in files if file.endswith('.mp3')]
txt_json_files = [file for file in files if file.endswith('.txt') or file.endswith('.json')]
if mp3_files:
# If there are both .mp3 and (.txt or .json) files, delete the .mp3 files
if txt_json_files:
for mp3_file in mp3_files:
mp3_file_path = os.path.join(subdir_path, mp3_file)
print(f"Deleted .mp3 file: {mp3_file_path}")
os.remove(mp3_file_path)
else:
# If there are only .mp3 files, print their names and containing directory
for mp3_file in mp3_files:
pass
# print(f".mp3 file without .txt or .json: {mp3_file} in directory {subdir_path}")
def print_frontend_content():
import os
# Define the list of relative paths of the files you want to print
file_paths = [
# f"{root_directory()}/../rag_app_vercel/app/app/api/auth/[...nextauth]/route.ts",
f"{root_directory()}/../rag_app_vercel/app/app/actions.ts",
f"{root_directory()}/../rag_app_vercel/app/app/api/chat/route.ts",
# f"{root_directory()}/../rag_app_vercel/app/chat/[id]/server-logic.ts",
f"{root_directory()}/../rag_app_vercel/app/app/api/chat/[id]/page.tsx",
# f"{root_directory()}/../rag_app_vercel/app/pages/chat.tsx",
# f"{root_directory()}/../rag_app_vercel/app/pages/index.tsx",
f"{root_directory()}/../rag_app_vercel/app/auth.ts",
# f"{root_directory()}/../rag_app_vercel/app/components/chat.tsx",
# f"{root_directory()}/../rag_app_vercel/app/components/chat-list.tsx",
# f"{root_directory()}/../rag_app_vercel/app/components/chat-message.tsx",
# f"{root_directory()}/../rag_app_vercel/app/components/chat-panel.tsx",
# f"{root_directory()}/../rag_app_vercel/app/lib/hooks/use-chat-service.tsx",
]
# file_path = 'app.py'
# print("Here is the content of the app.py backend:")
# with open(file_path, 'r') as file:
# content = file.read()
# print(f"{file_path}\n```\n{content}```\n")
print("\n\nHere is the content of the frontend files:")
# Iterate through the list, printing the content of each file
for file_path in file_paths:
if os.path.isfile(file_path):
with open(file_path, 'r') as file:
content = file.read()
print(f"`{file_path.replace('/home/user/PycharmProjects/rag/../rag_app_vercel/','')}`\n```\n{content}\n```\n\n")
else:
print(f"{file_path}\n```File not found```")
import os
import zipfile
def save_data_into_zip ():
def zip_files(directory, file_extension, zip_file):
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(file_extension):
zip_file.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), directory))
zip_filename = "collected_documents.zip"
# Create a zip file
with zipfile.ZipFile(zip_filename, 'w') as zipf:
# Add all .pdf files from baseline_evaluation_research_papers_2023-10-05
zip_files(f'{root_directory()}/datasets/evaluation_data/baseline_evaluation_research_papers_2023-10-05', '.pdf', zipf)
# Add all .txt files from nested directories in diarized_youtube_content_2023-10-06
zip_files(f'{root_directory()}/datasets/evaluation_data/diarized_youtube_content_2023-10-06', '.txt', zipf)
print(f"Files zipped into {zip_filename}")
def copy_txt_files_to_transcripts(rootdir=root_directory()):
source_dir = os.path.join(rootdir, 'datasets', 'evaluation_data', 'diarized_youtube_content_2023-10-06')
target_dir = os.path.join(rootdir, 'datasets', 'evaluation_data', 'transcripts')
# Create the target directory if it doesn't exist
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Copy all .txt files from nested subdirectories
for root, dirs, files in os.walk(source_dir):
for file in files:
if file.endswith('.txt'):
source_file = os.path.join(root, file)
shutil.copy(source_file, target_dir)
print(f"All .txt files copied to {target_dir}")
def process_messages(data):
try:
messages = data["chat_history"]
except KeyError:
# Handle the absence of chat_history key more gracefully
return None
chat_messages = []
for message in messages:
# Create a ChatMessage object for each message
chat_message = ChatMessage(
role=MessageRole(message.get("role", "user").lower()), # Convert the role to Enum
content=message.get("content", ""),
additional_kwargs=message.get("additional_kwargs", {}) # Assuming additional_kwargs is part of your message structure
)
chat_messages.append(chat_message)
return chat_messages
def delete_redundant_directories(root_path):
# Create a list to collect directories to be deleted
directories_to_delete = []
# Walk through the directory
for subdir, dirs, files in os.walk(root_path, topdown=False): # Note the 'topdown=False' parameter
for dir in dirs:
# Construct the path to the current directory
current_dir_path = os.path.join(subdir, dir)
# Check if directory name ends with the specified suffixes
if dir.endswith('_diarized_content') or dir.endswith('_diarized_content_processed_diarized'):
# Construct the file names that should exist in the parent directory
json_file = dir.split('_', 1)[-1] + '_diarized_content.json'
txt_file = dir.split('_', 1)[-1] + '_diarized_content_processed_diarized.txt'
# Construct the paths to the files that should exist
json_file_path = os.path.join(subdir, json_file)
txt_file_path = os.path.join(subdir, txt_file)
# Check if both files exist
if os.path.exists(json_file_path) and os.path.exists(txt_file_path):
# If both files exist, add the redundant directory to the list
print(f"{current_dir_path} is to be deleted")
directories_to_delete.append(current_dir_path)
# Delete the collected directories
for dir_path in directories_to_delete:
shutil.rmtree(dir_path)
print(f"Deleted redundant directory: {dir_path}")
def clean_mp3_dirs(directory):
clean_fullwidth_characters(directory)
move_remaining_mp3_to_their_subdirs()
merge_directories(directory)
delete_mp3_if_text_or_json_exists(directory)
import os
import shutil
def del_wrong_subdirs(root_dir):
# Define the expected maximum directory depth
expected_max_depth = 10 # Based on home/user/PycharmProjects/rag/datasets/evaluation_data/diarized_youtube_content_2023-10-06/<channel_name>/<release_date>_<video_title>/
for subdir, dirs, files in os.walk(root_dir, topdown=False):
# Split the path to evaluate its depth
path_parts = subdir.split(os.sep)
# Check if the directory name contains '_diarized_content' or '_diarized_content_processed_diarized'
if '_diarized_content' in subdir or '_diarized_content_processed_diarized' in subdir:
# Delete the directory and its content
# print(f"Removed directory and its content: {subdir}")
shutil.rmtree(subdir)
elif len(path_parts) > expected_max_depth:
# Delete the directory and its content if it exceeds the maximum depth
print(f"Removed directory and its content: {subdir}")
shutil.rmtree(subdir)
def merge_csv_files_remove_duplicates_and_save(csv_directory=f"{root_directory()}/../mev.fyi//data/links/articles", output_csv_path=f"{root_directory()}/../mev.fyi/data/links/merged_articles.csv"):
"""
Concatenates all CSV files in the given directory, removes duplicates based on the 'Link' column,
and saves the resulting DataFrame to the specified output path.
Args:
csv_directory (str): Directory containing CSV files to merge.
output_csv_path (str): Path to save the merged and deduplicated CSV file.
"""
# List all CSV files in the directory
csv_files = [os.path.join(csv_directory, f) for f in os.listdir(csv_directory) if f.endswith('.csv')]
df_list = []
# Load and concatenate all CSV files
for csv_file in csv_files:
df = pd.read_csv(csv_file)
df_list.append(df)
if df_list:
merged_df = pd.concat(df_list, ignore_index=True)
# Remove duplicates based on 'Link' column
deduplicated_df = merged_df.drop_duplicates(subset=['Link'])
# Save the resulting DataFrame to CSV
deduplicated_df.to_csv(output_csv_path, index=False)
logging.info(f"Merged and deduplicated CSV saved to: {output_csv_path}")
else:
logging.warning("No CSV files found in the provided directory.")
def clean_and_save_config(source_file_path, destination_file_path):
# Regular expressions to match imports and function definitions
import_re = re.compile(r'^\s*(from|import)\s+')
skip_line_re = re.compile(r'^(.*def |.*@|\s*with\s+|\s*for\s+|\s*if\s+|\s*try:|\s*except\s+|\s*lambda|\s*=\s*|\s*return)')
# Matches lines containing specific function keys in the dictionary
function_key_re = re.compile(r'.*:\s*(partial|lambda).*|\s*\'(html_parser|crawl_func|fetch_sidebar_func)\':\s*')
cleaned_lines = []
dict_nesting_level = 0
with open(source_file_path, 'r') as file:
in_site_configs = False
for line in file:
# Check if the line is the start of the site_configs dictionary
if 'site_configs = {' in line:
in_site_configs = True
dict_nesting_level = 1 # Starting the dictionary increases the nesting level
cleaned_lines.append(line)
continue
if in_site_configs:
# Increase or decrease dict_nesting_level based on the braces
dict_nesting_level += line.count('{') - line.count('}')
# If dict_nesting_level drops to 0, we've reached the end of the dictionary
if dict_nesting_level == 0:
cleaned_lines.append(line) # Include the line with the closing brace
break # Exit the loop as we've copied the entire dictionary
# Skip lines based on patterns (import, def, function calls, and specific keys)
if not (import_re.match(line) or skip_line_re.match(line) or function_key_re.match(line)):
cleaned_lines.append(line)
# Write the cleaned content to the destination file
with open(destination_file_path, 'w') as file:
file.writelines(cleaned_lines)
def copy_files_with_tree(source_dir, destination_dir, file_extension='.pdf'):
import shutil
for root, dirs, files in os.walk(source_dir):
# Constructing destination directory path based on the current root path
relative_path = os.path.relpath(root, source_dir)
current_destination_dir = os.path.join(destination_dir, relative_path)
# Ensure the destination directory exists
os.makedirs(current_destination_dir, exist_ok=True)
# Iterate through files in the current directory
for file_name in files:
if file_name.lower().endswith(file_extension):
source_file_path = os.path.join(root, file_name)
destination_file_path = os.path.join(current_destination_dir, file_name)
try:
shutil.copy(source_file_path, destination_file_path)
# print(f"Copied: {source_file_path} to {destination_file_path}")
except IOError as e:
print(f"Unable to copy file. {e}")
except Exception as e:
print(f"Unexpected error: {e}")
def process_and_copy_csv(csv_source_dir, destination_dir):
import os
import shutil
import csv
import json
csv_file_path = os.path.join(csv_source_dir, "docs_details.csv")
json_output_path = os.path.join(destination_dir, "docs_mapping.json")
# Copy the CSV file to the destination directory
shutil.copy(csv_file_path, destination_dir)
url_to_docname_mapping = {}
with open(csv_file_path, mode='r', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
pdf_link = row['pdf_link'].strip() # Ensure no trailing whitespace
document_name = row['document_name'].strip().replace('.pdf', '.png') # Replace .pdf with .png
url_to_docname_mapping[pdf_link] = document_name
# Log the mapping for verification
print("URL to Document Name Mapping:", url_to_docname_mapping)
with open(json_output_path, mode='w', encoding='utf-8') as json_file:
json.dump(url_to_docname_mapping, json_file, indent=4) # Pretty print for easier manual verification
print(f"CSV copied and mapping saved to {json_output_path}")
def copy_and_verify_files():
# Define the root directory for PycharmProjects
pycharm_projects_dir = f"{root_directory()}/../"
# Define the source directories
csv_source_dir = os.path.join(pycharm_projects_dir, "mev.fyi/data/")
articles_pdf_source_dir = os.path.join(pycharm_projects_dir, "mev.fyi/data/articles_pdf_download/")
articles_pdf_discourse_dir = os.path.join(articles_pdf_source_dir, "all_discourse_topics/")
articles_thumbnails_source_dir = os.path.join(pycharm_projects_dir, "mev.fyi/data/article_thumbnails/")
research_paper_thumbnails_source_dir = os.path.join(pycharm_projects_dir, "mev.fyi/data/research_papers_pdf_thumbnails/")
papers_pdf_source_dir = os.path.join(pycharm_projects_dir, "mev.fyi/data/papers_pdf_downloads/")
ethglobal_docs_dir = os.path.join(pycharm_projects_dir, "mev.fyi/data/ethglobal_hackathon/")
# Define the destination directories
csv_destination_dir = os.path.join(pycharm_projects_dir, "rag/datasets/evaluation_data/")
articles_pdf_destination_dir = os.path.join(pycharm_projects_dir, "rag/datasets/evaluation_data/articles_2023-12-05/")
articles_discourse_destination_dir = os.path.join(pycharm_projects_dir, "rag/datasets/evaluation_data/articles_discourse_2024_03_01/")
articles_thumbnails_destination_dir = os.path.join(pycharm_projects_dir, "rag_app_vercel/app/public/research_paper_thumbnails/")
papers_pdf_thumbnails_destination_dir = os.path.join(pycharm_projects_dir, "rag_app_vercel/app/public/research_paper_thumbnails/")
papers_pdf_destination_dir = os.path.join(pycharm_projects_dir, "rag/datasets/evaluation_data/baseline_evaluation_research_papers_2023-11-21/")
ethglobal_docs_destination_dir = os.path.join(pycharm_projects_dir, "rag/datasets/evaluation_data/ethglobal_docs_2024-03-16/")
# List of CSV files to copy
csv_files_to_copy_from_mevfyi_to_rag = [
"paper_details.csv",
"links/articles_updated.csv",
"links/merged_articles.csv",
"links/youtube/youtube_videos.csv",
"links/youtube/youtube_channel_handles.txt",
"docs_details.csv",
]
clean_and_save_config(source_file_path=f"{csv_source_dir}../src/populate_csv_files/get_article_content/ethglobal_hackathon/site_configs.py",
destination_file_path=f"{csv_destination_dir}site_configs.py")
csv_files_to_copy_from_rag_to_mevfyi = [
# "docs_details.csv",
]
# Create the destination directories if they do not exist
os.makedirs(csv_destination_dir, exist_ok=True)
os.makedirs(articles_pdf_destination_dir, exist_ok=True)
os.makedirs(papers_pdf_destination_dir, exist_ok=True)
os.makedirs(articles_thumbnails_destination_dir, exist_ok=True)
os.makedirs(articles_discourse_destination_dir, exist_ok=True) # Ensure the discourse articles destination directory exists
# Copy and verify CSV files
for file_name in csv_files_to_copy_from_mevfyi_to_rag: # from mev.fyi data repo to rag repo
source_file = os.path.join(csv_source_dir, file_name)
destination_file = os.path.join(csv_destination_dir, file_name.split('/')[-1]) # Get the last part if there's a path included
copy_and_verify(source_file, destination_file)
for file_name in csv_files_to_copy_from_rag_to_mevfyi: # from RAG repo to mevfyi data repo, quite hacky
source_file = os.path.join(csv_destination_dir, file_name)
destination_file = os.path.join(csv_source_dir, file_name.split('/')[-1]) # Get the last part if there's a path included
copy_and_verify(source_file, destination_file)
# Copy PDF files without size verification
copy_all_files(articles_pdf_source_dir, articles_pdf_destination_dir)
copy_all_files(papers_pdf_source_dir, papers_pdf_destination_dir)
process_and_copy_csv(csv_source_dir, f"{root_directory()}/../rag_app_vercel/app/public/")
copy_files_with_tree(articles_thumbnails_source_dir, articles_thumbnails_destination_dir, file_extension='.png')
copy_files_with_tree(research_paper_thumbnails_source_dir, papers_pdf_thumbnails_destination_dir, file_extension='.png')
# New: Copy and rename articles from discourse subdirectories
for subdir, dirs, files in os.walk(articles_pdf_discourse_dir):
for file_name in files:
if file_name.lower().endswith('.pdf'):
source_file = os.path.join(subdir, file_name)
destination_file = os.path.join(articles_discourse_destination_dir, file_name)
try:
shutil.copy(source_file, destination_file)
print(f"Copied: {source_file} to {destination_file}")
except Exception as e:
print(f"Error copying {file_name} from discourse topics: {e}")
# Copy ethglobal docs in rag
if os.path.exists(ethglobal_docs_destination_dir):
shutil.rmtree(ethglobal_docs_destination_dir) # Removes the entire directory tree
# Now use copytree to copy everything from the source to the destination directory.
shutil.copytree(ethglobal_docs_dir, ethglobal_docs_destination_dir)
copy_and_rename_website_docs_pdfs()
print("File copying completed.")
def copy_and_verify(source_file, destination_file):
try:
# Verify file size before copying
if os.path.exists(destination_file):
source_size = os.path.getsize(source_file)
destination_size = os.path.getsize(destination_file)
if destination_size > source_size:
# raise ValueError(f"File {os.path.basename(source_file)} in destination is larger than the source. Copy aborted.")
print(f"/!\File {os.path.basename(source_file)} in destination is larger than the source. Copy aborted.")
return
shutil.copy(source_file, destination_file)
# print(f"Copied: {source_file} to {destination_file}")
except IOError as e:
print(f"Unable to copy file. {e}")
except ValueError as e:
print(e)
# Stop the process if size condition is not met
except Exception as e:
print(f"Unexpected error: {e}")
def copy_all_files(source_dir, destination_dir, file_extension='.pdf'):
for file_name in os.listdir(source_dir):
if file_name.lower().endswith(file_extension): # Ensuring it is a PDF file
source_file = os.path.join(source_dir, file_name)
destination_file = os.path.join(destination_dir, file_name)
try:
shutil.copy(source_file, destination_file)
# print(f"Copied: {source_file} to {destination_file}")
except IOError as e:
print(f"Unable to copy file. {e}")
except Exception as e:
print(f"Unexpected error: {e}")
def copy_and_rename_website_docs_pdfs():
root_dir = root_directory()
source_directories = {
f'{root_dir}/../mev.fyi/data/flashbots_docs_pdf': f'{root_dir}/datasets/evaluation_data/flashbots_docs_2024_01_07',
f'{root_dir}/../mev.fyi/data/suave_docs_pdf': f'{root_dir}/datasets/evaluation_data/suave_docs_2024_03_13',
f'{root_dir}/../mev.fyi/data/ethereum_org_website_content': f'{root_dir}/datasets/evaluation_data/ethereum_org_content_2024_01_07'
}
for source_root, target_root in source_directories.items():
# Ensure the target directory exists
os.makedirs(target_root, exist_ok=True)
# Walk through the source directory
for root, dirs, files in os.walk(source_root):
for file in files:
if file.endswith(('.pdf', '.pdfx')):
# Construct the relative path
relative_path = os.path.relpath(root, source_root)
# Replace directory separators with '-' and remove leading directory name if present
leading_dir_name = os.path.basename(source_root) + '-'
relative_path = relative_path.replace(os.path.sep, '-')
if relative_path == '.':
new_filename = file
elif relative_path.startswith(leading_dir_name):
new_filename = relative_path[len(leading_dir_name):] + '-' + file
else:
new_filename = relative_path + '-' + file
# Change the file extension from .pdfx to .pdf if necessary
if new_filename.endswith('.pdfx'):
new_filename = new_filename[:-1]
# Construct the full source and target paths
source_file = os.path.join(root, file)
target_file = os.path.join(target_root, new_filename)
# Copy the file
shutil.copy2(source_file, target_file)
print(f"Copied and renamed {source_file.split('/')[-1]} to {target_file.split('/')[-1]}")
def save_successful_load_to_csv(documents_details, csv_filename='docs.csv', fieldnames=['title', 'authors', 'pdf_link', 'release_date', 'document_name']):
# Define the directory where you want to save the successful loads CSV
from src.Llama_index_sandbox import output_dir
# Create the directory if it doesn't exist
Path(output_dir).mkdir(parents=True, exist_ok=True)
csv_path = os.path.join(output_dir, csv_filename)
file_exists = os.path.isfile(csv_path)
if isinstance(documents_details, dict):
# Filter documents_details for only the fields in fieldnames
filtered_documents_details = {field: documents_details[field] for field in fieldnames}
else:
filtered_documents_details = {field: documents_details.extra_info[field] for field in fieldnames}
with open(csv_path, 'a', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# Write header only once if the file does not exist
if not file_exists:
writer.writeheader()
# Write the filtered document details to the CSV
writer.writerow(filtered_documents_details)
def get_embedding_model(embedding_model_name):
if embedding_model_name == "text-embedding-ada-002":
# embedding_model = OpenAIEmbedding(disallowed_special=())
embedding_model = OpenAIEmbedding() # https://github.com/langchain-ai/langchain/issues/923 encountered the same issue (2023-11-22)
else:
embedding_model = HuggingFaceEmbedding(
model_name=embedding_model_name,
# device='cuda'
)
# else:
# assert False, f"The embedding model is not supported: [{embedding_model_name}]"
return embedding_model
def load_csv_data(file_path):
if os.path.exists(file_path):
return pd.read_csv(file_path)
else:
logging.warning(f"CSV file not found at path: {file_path}")
return pd.DataFrame() # Return an empty DataFrame if file doesn't exist
@timeit
def compute_new_entries(latest_df: pd.DataFrame, current_df: pd.DataFrame, left_key='pdf_link', right_key='pdf_link', overwrite=False) -> pd.DataFrame:
"""
Compute the difference between latest_df and research_papers,
returning a DataFrame with entries not yet in research_papers.
Parameters:
- latest_df (pd.DataFrame): DataFrame loaded from latest_df.csv
- current_df (pd.DataFrame): DataFrame loaded from current_df.csv
Returns:
- pd.DataFrame: DataFrame with entries not yet in research_papers.csv
"""
# Assuming there's a unique identifier column named 'id' in both DataFrames
# Adjust 'id' to the column name you use as a unique identifier
if overwrite:
logging.info(f"New to be added to the database found: [{len(latest_df)}]")
return latest_df
else:
new_entries_df = latest_df[~latest_df[left_key].isin(current_df[right_key])]
logging.info(f"New to be added to the database found: [{len(new_entries_df)}]")
return new_entries_df
def load_vector_store_from_pinecone_database(delete_old_index=False, new_index=False, index_name=os.environ.get("PINECONE_INDEX_NAME", "mevfyi-cosine")):
pc = Pinecone(
api_key=os.environ.get("PINECONE_API_KEY")
)
if new_index:
# pass
if delete_old_index:
logging.warning(f"Are you sure you want to delete the old index with name [{index_name}]?")
pc.delete_index(index_name)
# Dimensions are for text-embedding-ada-002
from pinecone import ServerlessSpec
pc.create_index(
name=index_name,
dimension=1536,
metric="cosine",
spec=ServerlessSpec(cloud="aws", region="us-west-2"),
)
pinecone_index = pc.Index(index_name)
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
return vector_store
def load_vector_store_from_pinecone_database_legacy(index_name=os.environ.get("PINECONE_INDEX_NAME", "mevfyi-cosine")):
pc = Pinecone(
api_key=os.environ.get("PINECONE_API_KEY")
)
pinecone_index = pc.Index(index_name)
# from llama_index.legacy.vector_stores import PineconeVectorStore
import llama_index.legacy.vector_stores as legacy_vector_stores
vector_store = legacy_vector_stores.PineconeVectorStore(pinecone_index=pinecone_index)
return vector_store
def save_metadata_to_pipeline_dir(all_metadata, root_dir, dir='pipeline_storage/docs.csv', drop_key='pdf_link'):
# Save to CSV
df = pd.DataFrame(all_metadata)
csv_path = os.path.join(root_dir, dir)
if os.path.exists(csv_path):
existing_df = pd.read_csv(csv_path)
combined_df = pd.concat([existing_df, df]).drop_duplicates(subset=[drop_key])
else:
combined_df = df
combined_df.to_csv(csv_path, index=False)
logging.info(f"Metadata with # of unique videps [{combined_df.shape[0]}] saved to [{csv_path}]")
if __name__ == '__main__':
pass
copy_and_verify_files()
# copy_and_rename_website_docs_pdfs()
# directory = f"{root_directory()}/datasets/evaluation_data/diarized_youtube_content_2023-10-06"
# clean_fullwidth_characters(directory)
# move_remaining_mp3_to_their_subdirs()
# merge_directories(directory)
# delete_mp3_if_text_or_json_exists(directory)
# directory = f"{root_directory()}/datasets/evaluation_data/diarized_youtube_content_2023-10-06"
# pdf_dir = f"{root_directory()}/datasets/evaluation_data/baseline_evaluation_research_papers_2023-10-05"
# # clean_mp3_dirs(directory=directory)
# del_wrong_subdirs(directory)
# move_remaining_txt_to_their_subdirs()
# move_remaining_json_to_their_subdirs()
# print_frontend_content()
# delete_mp3_if_text_or_json_exists(directory)
# save_data_into_zip()
# copy_txt_files_to_transcripts() | [
"llama_index.legacy.OpenAIEmbedding",
"llama_index.legacy.vector_stores.PineconeVectorStore",
"llama_index.legacy.embeddings.HuggingFaceEmbedding",
"llama_index.vector_stores.pinecone.PineconeVectorStore"
] | [((983, 1012), 'os.path.exists', 'os.path.exists', (['"""/.dockerenv"""'], {}), "('/.dockerenv')\n", (997, 1012), False, 'import os\n'), ((1787, 1798), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1796, 1798), False, 'import os\n'), ((1810, 1833), 'os.path.abspath', 'os.path.abspath', (['os.sep'], {}), '(os.sep)\n', (1825, 1833), False, 'import os\n'), ((3505, 3541), 'os.makedirs', 'os.makedirs', (['logs_dir'], {'exist_ok': '(True)'}), '(logs_dir, exist_ok=True)\n', (3516, 3541), False, 'import os\n'), ((3589, 3603), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3601, 3603), False, 'from datetime import datetime\n'), ((3705, 3724), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3722, 3724), False, 'import logging\n'), ((4082, 4115), 'logging.FileHandler', 'logging.FileHandler', (['log_filename'], {}), '(log_filename)\n', (4101, 4115), False, 'import logging\n'), ((4331, 4354), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (4352, 4354), False, 'import logging\n'), ((4611, 4676), 'logging.info', 'logging.info', (['f"""********* {log_prefix} LOGGING STARTED *********"""'], {}), "(f'********* {log_prefix} LOGGING STARTED *********')\n", (4623, 4676), False, 'import logging\n'), ((4941, 4952), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (4946, 4952), False, 'from functools import wraps\n'), ((6604, 6742), 'google.oauth2.service_account.Credentials.from_service_account_file', 'ServiceAccountCredentials.from_service_account_file', (['service_account_file'], {'scopes': "['https://www.googleapis.com/auth/youtube.readonly']"}), "(service_account_file,\n scopes=['https://www.googleapis.com/auth/youtube.readonly'])\n", (6655, 6742), True, 'from google.oauth2.service_account import Credentials as ServiceAccountCredentials\n'), ((7560, 7578), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (7567, 7578), False, 'import os\n'), ((9423, 9447), 'pandas.read_csv', 'pd.read_csv', (['videos_path'], {}), '(videos_path)\n', (9434, 9447), True, 'import pandas as pd\n'), ((11485, 11509), 'pandas.read_csv', 'pd.read_csv', (['videos_path'], {}), '(videos_path)\n', (11496, 11509), True, 'import pandas as pd\n'), ((14014, 14038), 'pandas.read_csv', 'pd.read_csv', (['videos_path'], {}), '(videos_path)\n', (14025, 14038), True, 'import pandas as pd\n'), ((17218, 17236), 'os.walk', 'os.walk', (['base_path'], {}), '(base_path)\n', (17225, 17236), False, 'import os\n'), ((19351, 19369), 'os.walk', 'os.walk', (['base_path'], {}), '(base_path)\n', (19358, 19369), False, 'import os\n'), ((21120, 21153), 'os.walk', 'os.walk', (['base_path'], {'topdown': '(False)'}), '(base_path, topdown=False)\n', (21127, 21153), False, 'import os\n'), ((22851, 22869), 'os.walk', 'os.walk', (['base_path'], {}), '(base_path)\n', (22858, 22869), False, 'import os\n'), ((26845, 26940), 'os.path.join', 'os.path.join', (['rootdir', '"""datasets"""', '"""evaluation_data"""', '"""diarized_youtube_content_2023-10-06"""'], {}), "(rootdir, 'datasets', 'evaluation_data',\n 'diarized_youtube_content_2023-10-06')\n", (26857, 26940), False, 'import os\n'), ((26954, 27021), 'os.path.join', 'os.path.join', (['rootdir', '"""datasets"""', '"""evaluation_data"""', '"""transcripts"""'], {}), "(rootdir, 'datasets', 'evaluation_data', 'transcripts')\n", (26966, 27021), False, 'import os\n'), ((27231, 27250), 'os.walk', 'os.walk', (['source_dir'], {}), '(source_dir)\n', (27238, 27250), False, 'import os\n'), ((28360, 28393), 'os.walk', 'os.walk', (['root_path'], {'topdown': '(False)'}), '(root_path, topdown=False)\n', (28367, 28393), False, 'import os\n'), ((30238, 30270), 'os.walk', 'os.walk', (['root_dir'], {'topdown': '(False)'}), '(root_dir, topdown=False)\n', (30245, 30270), False, 'import os\n'), ((32423, 32459), 're.compile', 're.compile', (['"""^\\\\s*(from|import)\\\\s+"""'], {}), "('^\\\\s*(from|import)\\\\s+')\n", (32433, 32459), False, 'import re\n'), ((32478, 32607), 're.compile', 're.compile', (['"""^(.*def |.*@|\\\\s*with\\\\s+|\\\\s*for\\\\s+|\\\\s*if\\\\s+|\\\\s*try:|\\\\s*except\\\\s+|\\\\s*lambda|\\\\s*=\\\\s*|\\\\s*return)"""'], {}), "(\n '^(.*def |.*@|\\\\s*with\\\\s+|\\\\s*for\\\\s+|\\\\s*if\\\\s+|\\\\s*try:|\\\\s*except\\\\s+|\\\\s*lambda|\\\\s*=\\\\s*|\\\\s*return)'\n )\n", (32488, 32607), False, 'import re\n'), ((32680, 32788), 're.compile', 're.compile', (['""".*:\\\\s*(partial|lambda).*|\\\\s*\\\\\'(html_parser|crawl_func|fetch_sidebar_func)\\\\\':\\\\s*"""'], {}), '(\n ".*:\\\\s*(partial|lambda).*|\\\\s*\\\\\'(html_parser|crawl_func|fetch_sidebar_func)\\\\\':\\\\s*"\n )\n', (32690, 32788), False, 'import re\n'), ((34267, 34286), 'os.walk', 'os.walk', (['source_dir'], {}), '(source_dir)\n', (34274, 34286), False, 'import os\n'), ((35429, 35477), 'os.path.join', 'os.path.join', (['csv_source_dir', '"""docs_details.csv"""'], {}), "(csv_source_dir, 'docs_details.csv')\n", (35441, 35477), False, 'import os\n'), ((35501, 35551), 'os.path.join', 'os.path.join', (['destination_dir', '"""docs_mapping.json"""'], {}), "(destination_dir, 'docs_mapping.json')\n", (35513, 35551), False, 'import os\n'), ((35610, 35653), 'shutil.copy', 'shutil.copy', (['csv_file_path', 'destination_dir'], {}), '(csv_file_path, destination_dir)\n', (35621, 35653), False, 'import shutil\n'), ((36636, 36687), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""mev.fyi/data/"""'], {}), "(pycharm_projects_dir, 'mev.fyi/data/')\n", (36648, 36687), False, 'import os\n'), ((36718, 36791), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""mev.fyi/data/articles_pdf_download/"""'], {}), "(pycharm_projects_dir, 'mev.fyi/data/articles_pdf_download/')\n", (36730, 36791), False, 'import os\n'), ((36825, 36887), 'os.path.join', 'os.path.join', (['articles_pdf_source_dir', '"""all_discourse_topics/"""'], {}), "(articles_pdf_source_dir, 'all_discourse_topics/')\n", (36837, 36887), False, 'import os\n'), ((36925, 36995), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""mev.fyi/data/article_thumbnails/"""'], {}), "(pycharm_projects_dir, 'mev.fyi/data/article_thumbnails/')\n", (36937, 36995), False, 'import os\n'), ((37039, 37125), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""mev.fyi/data/research_papers_pdf_thumbnails/"""'], {}), "(pycharm_projects_dir,\n 'mev.fyi/data/research_papers_pdf_thumbnails/')\n", (37051, 37125), False, 'import os\n'), ((37150, 37222), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""mev.fyi/data/papers_pdf_downloads/"""'], {}), "(pycharm_projects_dir, 'mev.fyi/data/papers_pdf_downloads/')\n", (37162, 37222), False, 'import os\n'), ((37248, 37319), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""mev.fyi/data/ethglobal_hackathon/"""'], {}), "(pycharm_projects_dir, 'mev.fyi/data/ethglobal_hackathon/')\n", (37260, 37319), False, 'import os\n'), ((37388, 37455), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""rag/datasets/evaluation_data/"""'], {}), "(pycharm_projects_dir, 'rag/datasets/evaluation_data/')\n", (37400, 37455), False, 'import os\n'), ((37491, 37582), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""rag/datasets/evaluation_data/articles_2023-12-05/"""'], {}), "(pycharm_projects_dir,\n 'rag/datasets/evaluation_data/articles_2023-12-05/')\n", (37503, 37582), False, 'import os\n'), ((37620, 37721), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""rag/datasets/evaluation_data/articles_discourse_2024_03_01/"""'], {}), "(pycharm_projects_dir,\n 'rag/datasets/evaluation_data/articles_discourse_2024_03_01/')\n", (37632, 37721), False, 'import os\n'), ((37760, 37854), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""rag_app_vercel/app/public/research_paper_thumbnails/"""'], {}), "(pycharm_projects_dir,\n 'rag_app_vercel/app/public/research_paper_thumbnails/')\n", (37772, 37854), False, 'import os\n'), ((37895, 37989), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""rag_app_vercel/app/public/research_paper_thumbnails/"""'], {}), "(pycharm_projects_dir,\n 'rag_app_vercel/app/public/research_paper_thumbnails/')\n", (37907, 37989), False, 'import os\n'), ((38019, 38142), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""rag/datasets/evaluation_data/baseline_evaluation_research_papers_2023-11-21/"""'], {}), "(pycharm_projects_dir,\n 'rag/datasets/evaluation_data/baseline_evaluation_research_papers_2023-11-21/'\n )\n", (38031, 38142), False, 'import os\n'), ((38171, 38268), 'os.path.join', 'os.path.join', (['pycharm_projects_dir', '"""rag/datasets/evaluation_data/ethglobal_docs_2024-03-16/"""'], {}), "(pycharm_projects_dir,\n 'rag/datasets/evaluation_data/ethglobal_docs_2024-03-16/')\n", (38183, 38268), False, 'import os\n'), ((38969, 39016), 'os.makedirs', 'os.makedirs', (['csv_destination_dir'], {'exist_ok': '(True)'}), '(csv_destination_dir, exist_ok=True)\n', (38980, 39016), False, 'import os\n'), ((39021, 39077), 'os.makedirs', 'os.makedirs', (['articles_pdf_destination_dir'], {'exist_ok': '(True)'}), '(articles_pdf_destination_dir, exist_ok=True)\n', (39032, 39077), False, 'import os\n'), ((39082, 39136), 'os.makedirs', 'os.makedirs', (['papers_pdf_destination_dir'], {'exist_ok': '(True)'}), '(papers_pdf_destination_dir, exist_ok=True)\n', (39093, 39136), False, 'import os\n'), ((39141, 39204), 'os.makedirs', 'os.makedirs', (['articles_thumbnails_destination_dir'], {'exist_ok': '(True)'}), '(articles_thumbnails_destination_dir, exist_ok=True)\n', (39152, 39204), False, 'import os\n'), ((39209, 39271), 'os.makedirs', 'os.makedirs', (['articles_discourse_destination_dir'], {'exist_ok': '(True)'}), '(articles_discourse_destination_dir, exist_ok=True)\n', (39220, 39271), False, 'import os\n'), ((40704, 40739), 'os.walk', 'os.walk', (['articles_pdf_discourse_dir'], {}), '(articles_pdf_discourse_dir)\n', (40711, 40739), False, 'import os\n'), ((41302, 41348), 'os.path.exists', 'os.path.exists', (['ethglobal_docs_destination_dir'], {}), '(ethglobal_docs_destination_dir)\n', (41316, 41348), False, 'import os\n'), ((41534, 41601), 'shutil.copytree', 'shutil.copytree', (['ethglobal_docs_dir', 'ethglobal_docs_destination_dir'], {}), '(ethglobal_docs_dir, ethglobal_docs_destination_dir)\n', (41549, 41601), False, 'import shutil\n'), ((42722, 42744), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (42732, 42744), False, 'import os\n'), ((45825, 45863), 'os.path.join', 'os.path.join', (['output_dir', 'csv_filename'], {}), '(output_dir, csv_filename)\n', (45837, 45863), False, 'import os\n'), ((45882, 45906), 'os.path.isfile', 'os.path.isfile', (['csv_path'], {}), '(csv_path)\n', (45896, 45906), False, 'import os\n'), ((47222, 47247), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (47236, 47247), False, 'import os\n'), ((48582, 48636), 'os.environ.get', 'os.environ.get', (['"""PINECONE_INDEX_NAME"""', '"""mevfyi-cosine"""'], {}), "('PINECONE_INDEX_NAME', 'mevfyi-cosine')\n", (48596, 48636), False, 'import os\n'), ((49266, 49316), 'llama_index.vector_stores.pinecone.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index'}), '(pinecone_index=pinecone_index)\n', (49285, 49316), False, 'from llama_index.vector_stores.pinecone import PineconeVectorStore\n'), ((49406, 49460), 'os.environ.get', 'os.environ.get', (['"""PINECONE_INDEX_NAME"""', '"""mevfyi-cosine"""'], {}), "('PINECONE_INDEX_NAME', 'mevfyi-cosine')\n", (49420, 49460), False, 'import os\n'), ((49741, 49812), 'llama_index.legacy.vector_stores.PineconeVectorStore', 'legacy_vector_stores.PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index'}), '(pinecone_index=pinecone_index)\n', (49781, 49812), True, 'import llama_index.legacy.vector_stores as legacy_vector_stores\n'), ((49979, 50005), 'pandas.DataFrame', 'pd.DataFrame', (['all_metadata'], {}), '(all_metadata)\n', (49991, 50005), True, 'import pandas as pd\n'), ((50021, 50048), 'os.path.join', 'os.path.join', (['root_dir', 'dir'], {}), '(root_dir, dir)\n', (50033, 50048), False, 'import os\n'), ((50056, 50080), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (50070, 50080), False, 'import os\n'), ((50297, 50403), 'logging.info', 'logging.info', (['f"""Metadata with # of unique videps [{combined_df.shape[0]}] saved to [{csv_path}]"""'], {}), "(\n f'Metadata with # of unique videps [{combined_df.shape[0]}] saved to [{csv_path}]'\n )\n", (50309, 50403), False, 'import logging\n'), ((1228, 1323), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', '--show-toplevel']"], {'stderr': 'subprocess.STDOUT'}), "(['git', 'rev-parse', '--show-toplevel'], stderr=\n subprocess.STDOUT)\n", (1251, 1323), False, 'import subprocess\n'), ((4146, 4208), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (4163, 4208), False, 'import logging\n'), ((4388, 4450), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (4405, 4450), False, 'import logging\n'), ((7604, 7638), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.mp3"""'], {}), "(filenames, '*.mp3')\n", (7618, 7638), False, 'import fnmatch\n'), ((7726, 7761), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.json"""'], {}), "(filenames, '*.json')\n", (7740, 7761), False, 'import fnmatch\n'), ((7854, 7888), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.txt"""'], {}), "(filenames, '*.txt')\n", (7868, 7888), False, 'import fnmatch\n'), ((8721, 8745), 'os.path.exists', 'os.path.exists', (['mp3_file'], {}), '(mp3_file)\n', (8735, 8745), False, 'import os\n'), ((25504, 25529), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (25518, 25529), False, 'import os\n'), ((25959, 25977), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (25966, 25977), False, 'import os\n'), ((26256, 26290), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_filename', '"""w"""'], {}), "(zip_filename, 'w')\n", (26271, 26290), False, 'import zipfile\n'), ((27088, 27114), 'os.path.exists', 'os.path.exists', (['target_dir'], {}), '(target_dir)\n', (27102, 27114), False, 'import os\n'), ((27124, 27147), 'os.makedirs', 'os.makedirs', (['target_dir'], {}), '(target_dir)\n', (27135, 27147), False, 'import os\n'), ((29639, 29662), 'shutil.rmtree', 'shutil.rmtree', (['dir_path'], {}), '(dir_path)\n', (29652, 29662), False, 'import shutil\n'), ((31560, 31590), 'os.path.join', 'os.path.join', (['csv_directory', 'f'], {}), '(csv_directory, f)\n', (31572, 31590), False, 'import os\n'), ((31752, 31773), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (31763, 31773), True, 'import pandas as pd\n'), ((31838, 31875), 'pandas.concat', 'pd.concat', (['df_list'], {'ignore_index': '(True)'}), '(df_list, ignore_index=True)\n', (31847, 31875), True, 'import pandas as pd\n'), ((32113, 32185), 'logging.info', 'logging.info', (['f"""Merged and deduplicated CSV saved to: {output_csv_path}"""'], {}), "(f'Merged and deduplicated CSV saved to: {output_csv_path}')\n", (32125, 32185), False, 'import logging\n'), ((32204, 32268), 'logging.warning', 'logging.warning', (['"""No CSV files found in the provided directory."""'], {}), "('No CSV files found in the provided directory.')\n", (32219, 32268), False, 'import logging\n'), ((34393, 34426), 'os.path.relpath', 'os.path.relpath', (['root', 'source_dir'], {}), '(root, source_dir)\n', (34408, 34426), False, 'import os\n'), ((34461, 34505), 'os.path.join', 'os.path.join', (['destination_dir', 'relative_path'], {}), '(destination_dir, relative_path)\n', (34473, 34505), False, 'import os\n'), ((34565, 34616), 'os.makedirs', 'os.makedirs', (['current_destination_dir'], {'exist_ok': '(True)'}), '(current_destination_dir, exist_ok=True)\n', (34576, 34616), False, 'import os\n'), ((35779, 35803), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (35793, 35803), False, 'import csv\n'), ((36273, 36327), 'json.dump', 'json.dump', (['url_to_docname_mapping', 'json_file'], {'indent': '(4)'}), '(url_to_docname_mapping, json_file, indent=4)\n', (36282, 36327), False, 'import json\n'), ((39486, 39525), 'os.path.join', 'os.path.join', (['csv_source_dir', 'file_name'], {}), '(csv_source_dir, file_name)\n', (39498, 39525), False, 'import os\n'), ((39848, 39892), 'os.path.join', 'os.path.join', (['csv_destination_dir', 'file_name'], {}), '(csv_destination_dir, file_name)\n', (39860, 39892), False, 'import os\n'), ((41358, 41403), 'shutil.rmtree', 'shutil.rmtree', (['ethglobal_docs_destination_dir'], {}), '(ethglobal_docs_destination_dir)\n', (41371, 41403), False, 'import shutil\n'), ((41796, 41828), 'os.path.exists', 'os.path.exists', (['destination_file'], {}), '(destination_file)\n', (41810, 41828), False, 'import os\n'), ((42284, 42326), 'shutil.copy', 'shutil.copy', (['source_file', 'destination_file'], {}), '(source_file, destination_file)\n', (42295, 42326), False, 'import shutil\n'), ((43884, 43923), 'os.makedirs', 'os.makedirs', (['target_root'], {'exist_ok': '(True)'}), '(target_root, exist_ok=True)\n', (43895, 43923), False, 'import os\n'), ((44002, 44022), 'os.walk', 'os.walk', (['source_root'], {}), '(source_root)\n', (44009, 44022), False, 'import os\n'), ((46321, 46367), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames'}), '(csvfile, fieldnames=fieldnames)\n', (46335, 46367), False, 'import csv\n'), ((46799, 46816), 'llama_index.legacy.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (46814, 46816), False, 'from llama_index.legacy import OpenAIEmbedding\n'), ((46949, 47002), 'llama_index.legacy.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embedding_model_name'}), '(model_name=embedding_model_name)\n', (46969, 47002), False, 'from llama_index.legacy.embeddings import HuggingFaceEmbedding\n'), ((47264, 47286), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (47275, 47286), True, 'import pandas as pd\n'), ((47305, 47364), 'logging.warning', 'logging.warning', (['f"""CSV file not found at path: {file_path}"""'], {}), "(f'CSV file not found at path: {file_path}')\n", (47320, 47364), False, 'import logging\n'), ((47380, 47394), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (47392, 47394), True, 'import pandas as pd\n'), ((50104, 50125), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (50115, 50125), True, 'import pandas as pd\n'), ((2115, 2143), 'os.path.dirname', 'os.path.dirname', (['current_dir'], {}), '(current_dir)\n', (2130, 2143), False, 'import os\n'), ((5386, 5410), 'os.getenv', 'os.getenv', (['"""ENVIRONMENT"""'], {}), "('ENVIRONMENT')\n", (5395, 5410), False, 'import os\n'), ((5524, 5545), 'inspect.getfile', 'inspect.getfile', (['func'], {}), '(func)\n', (5539, 5545), False, 'import inspect\n'), ((5580, 5604), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (5593, 5604), False, 'import os\n'), ((5628, 5655), 'os.path.basename', 'os.path.basename', (['directory'], {}), '(directory)\n', (5644, 5655), False, 'import os\n'), ((5715, 5778), 'logging.info', 'logging.info', (['f"""{dir_name}.{filename}.{func.__name__} STARTED."""'], {}), "(f'{dir_name}.{filename}.{func.__name__} STARTED.')\n", (5727, 5778), False, 'import logging\n'), ((5804, 5815), 'time.time', 'time.time', ([], {}), '()\n', (5813, 5815), False, 'import time\n'), ((5947, 5958), 'time.time', 'time.time', ([], {}), '()\n', (5956, 5958), False, 'import time\n'), ((8264, 8313), 're.sub', 're.sub', (['"""^\\\\d{4}-\\\\d{2}-\\\\d{2}_"""', '""""""', 'jt_basename'], {}), "('^\\\\d{4}-\\\\d{2}-\\\\d{2}_', '', jt_basename)\n", (8270, 8313), False, 'import re\n'), ((8377, 8446), 're.sub', 're.sub', (['"""(_diarized_content(_processed_diarized)?)$"""', '""""""', 'jt_basename'], {}), "('(_diarized_content(_processed_diarized)?)$', '', jt_basename)\n", (8383, 8446), False, 'import re\n'), ((8759, 8778), 'os.remove', 'os.remove', (['mp3_file'], {}), '(mp3_file)\n', (8768, 8778), False, 'import os\n'), ((10429, 10454), 'os.path.dirname', 'os.path.dirname', (['mp3_file'], {}), '(mp3_file)\n', (10444, 10454), False, 'import os\n'), ((10916, 10956), 'os.makedirs', 'os.makedirs', (['new_dir_path'], {'exist_ok': '(True)'}), '(new_dir_path, exist_ok=True)\n', (10927, 10956), False, 'import os\n'), ((11051, 11092), 'os.path.join', 'os.path.join', (['new_dir_path', 'new_file_name'], {}), '(new_dir_path, new_file_name)\n', (11063, 11092), False, 'import os\n'), ((11172, 11208), 'shutil.move', 'shutil.move', (['mp3_file', 'new_file_path'], {}), '(mp3_file, new_file_path)\n', (11183, 11208), False, 'import shutil\n'), ((13247, 13287), 'os.makedirs', 'os.makedirs', (['new_dir_path'], {'exist_ok': '(True)'}), '(new_dir_path, exist_ok=True)\n', (13258, 13287), False, 'import os\n'), ((13389, 13430), 'os.path.join', 'os.path.join', (['new_dir_path', 'new_file_name'], {}), '(new_dir_path, new_file_name)\n', (13401, 13430), False, 'import os\n'), ((13446, 13475), 'os.path.exists', 'os.path.exists', (['new_file_path'], {}), '(new_file_path)\n', (13460, 13475), False, 'import os\n'), ((15750, 15790), 'os.makedirs', 'os.makedirs', (['new_dir_path'], {'exist_ok': '(True)'}), '(new_dir_path, exist_ok=True)\n', (15761, 15790), False, 'import os\n'), ((15892, 15933), 'os.path.join', 'os.path.join', (['new_dir_path', 'new_file_name'], {}), '(new_dir_path, new_file_name)\n', (15904, 15933), False, 'import os\n'), ((15949, 15978), 'os.path.exists', 'os.path.exists', (['new_file_path'], {}), '(new_file_path)\n', (15963, 15978), False, 'import os\n'), ((17458, 17486), 'os.path.join', 'os.path.join', (['root', 'dir_name'], {}), '(root, dir_name)\n', (17470, 17486), False, 'import os\n'), ((17636, 17664), 'os.path.join', 'os.path.join', (['root', 'dir_name'], {}), '(root, dir_name)\n', (17648, 17664), False, 'import os\n'), ((18942, 18967), 'os.listdir', 'os.listdir', (['dir_to_remove'], {}), '(dir_to_remove)\n', (18952, 18967), False, 'import os\n'), ((18981, 19004), 'os.rmdir', 'os.rmdir', (['dir_to_remove'], {}), '(dir_to_remove)\n', (18989, 19004), False, 'import os\n'), ((19711, 19735), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (19723, 19735), False, 'import os\n'), ((21403, 21427), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (21415, 21427), False, 'import os\n'), ((21456, 21489), 'os.path.join', 'os.path.join', (['root', 'new_file_name'], {}), '(root, new_file_name)\n', (21468, 21489), False, 'import os\n'), ((22152, 22175), 'os.path.join', 'os.path.join', (['root', 'dir'], {}), '(root, dir)\n', (22164, 22175), False, 'import os\n'), ((22203, 22235), 'os.path.join', 'os.path.join', (['root', 'new_dir_name'], {}), '(root, new_dir_name)\n', (22215, 22235), False, 'import os\n'), ((22922, 22945), 'os.path.join', 'os.path.join', (['root', 'dir'], {}), '(root, dir)\n', (22934, 22945), False, 'import os\n'), ((23028, 23051), 'os.listdir', 'os.listdir', (['subdir_path'], {}), '(subdir_path)\n', (23038, 23051), False, 'import os\n'), ((28547, 28572), 'os.path.join', 'os.path.join', (['subdir', 'dir'], {}), '(subdir, dir)\n', (28559, 28572), False, 'import os\n'), ((30696, 30717), 'shutil.rmtree', 'shutil.rmtree', (['subdir'], {}), '(subdir)\n', (30709, 30717), False, 'import shutil\n'), ((31600, 31625), 'os.listdir', 'os.listdir', (['csv_directory'], {}), '(csv_directory)\n', (31610, 31625), False, 'import os\n'), ((41856, 41884), 'os.path.getsize', 'os.path.getsize', (['source_file'], {}), '(source_file)\n', (41871, 41884), False, 'import os\n'), ((41916, 41949), 'os.path.getsize', 'os.path.getsize', (['destination_file'], {}), '(destination_file)\n', (41931, 41949), False, 'import os\n'), ((42856, 42891), 'os.path.join', 'os.path.join', (['source_dir', 'file_name'], {}), '(source_dir, file_name)\n', (42868, 42891), False, 'import os\n'), ((42923, 42963), 'os.path.join', 'os.path.join', (['destination_dir', 'file_name'], {}), '(destination_dir, file_name)\n', (42935, 42963), False, 'import os\n'), ((45758, 45774), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (45762, 45774), False, 'from pathlib import Path\n'), ((48674, 48708), 'os.environ.get', 'os.environ.get', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (48688, 48708), False, 'import os\n'), ((48789, 48885), 'logging.warning', 'logging.warning', (['f"""Are you sure you want to delete the old index with name [{index_name}]?"""'], {}), "(\n f'Are you sure you want to delete the old index with name [{index_name}]?')\n", (48804, 48885), False, 'import logging\n'), ((49498, 49532), 'os.environ.get', 'os.environ.get', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (49512, 49532), False, 'import os\n'), ((1967, 1990), 'os.listdir', 'os.listdir', (['current_dir'], {}), '(current_dir)\n', (1977, 1990), False, 'import os\n'), ((7669, 7700), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (7681, 7700), False, 'import os\n'), ((7797, 7828), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (7809, 7828), False, 'import os\n'), ((7924, 7955), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (7936, 7955), False, 'import os\n'), ((10863, 10888), 'os.path.dirname', 'os.path.dirname', (['mp3_file'], {}), '(mp3_file)\n', (10878, 10888), False, 'import os\n'), ((13054, 13079), 'os.path.dirname', 'os.path.dirname', (['txt_file'], {}), '(txt_file)\n', (13069, 13079), False, 'import os\n'), ((13194, 13219), 'os.path.dirname', 'os.path.dirname', (['txt_file'], {}), '(txt_file)\n', (13209, 13219), False, 'import os\n'), ((13577, 13596), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (13586, 13596), False, 'import os\n'), ((13700, 13736), 'shutil.move', 'shutil.move', (['txt_file', 'new_file_path'], {}), '(txt_file, new_file_path)\n', (13711, 13736), False, 'import shutil\n'), ((15555, 15581), 'os.path.dirname', 'os.path.dirname', (['json_file'], {}), '(json_file)\n', (15570, 15581), False, 'import os\n'), ((15696, 15722), 'os.path.dirname', 'os.path.dirname', (['json_file'], {}), '(json_file)\n', (15711, 15722), False, 'import os\n'), ((16081, 16101), 'os.remove', 'os.remove', (['json_file'], {}), '(json_file)\n', (16090, 16101), False, 'import os\n'), ((16206, 16243), 'shutil.move', 'shutil.move', (['json_file', 'new_file_path'], {}), '(json_file, new_file_path)\n', (16217, 16243), False, 'import shutil\n'), ((19965, 19998), 'os.path.join', 'os.path.join', (['root', 'new_file_name'], {}), '(root, new_file_name)\n', (19977, 19998), False, 'import os\n'), ((20019, 20048), 'os.path.exists', 'os.path.exists', (['new_file_path'], {}), '(new_file_path)\n', (20033, 20048), False, 'import os\n'), ((20609, 20638), 'os.remove', 'os.remove', (['original_file_path'], {}), '(original_file_path)\n', (20618, 20638), False, 'import os\n'), ((21548, 21577), 'os.path.exists', 'os.path.exists', (['new_file_path'], {}), '(new_file_path)\n', (21562, 21577), False, 'import os\n'), ((22292, 22320), 'os.path.exists', 'os.path.exists', (['new_dir_path'], {}), '(new_dir_path)\n', (22306, 22320), False, 'import os\n'), ((27347, 27371), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (27359, 27371), False, 'import os\n'), ((27388, 27424), 'shutil.copy', 'shutil.copy', (['source_file', 'target_dir'], {}), '(source_file, target_dir)\n', (27399, 27424), False, 'import shutil\n'), ((29108, 29139), 'os.path.join', 'os.path.join', (['subdir', 'json_file'], {}), '(subdir, json_file)\n', (29120, 29139), False, 'import os\n'), ((29172, 29202), 'os.path.join', 'os.path.join', (['subdir', 'txt_file'], {}), '(subdir, txt_file)\n', (29184, 29202), False, 'import os\n'), ((30930, 30951), 'shutil.rmtree', 'shutil.rmtree', (['subdir'], {}), '(subdir)\n', (30943, 30951), False, 'import shutil\n'), ((34801, 34830), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (34813, 34830), False, 'import os\n'), ((34871, 34919), 'os.path.join', 'os.path.join', (['current_destination_dir', 'file_name'], {}), '(current_destination_dir, file_name)\n', (34883, 34919), False, 'import os\n'), ((40854, 40885), 'os.path.join', 'os.path.join', (['subdir', 'file_name'], {}), '(subdir, file_name)\n', (40866, 40885), False, 'import os\n'), ((40921, 40980), 'os.path.join', 'os.path.join', (['articles_discourse_destination_dir', 'file_name'], {}), '(articles_discourse_destination_dir, file_name)\n', (40933, 40980), False, 'import os\n'), ((42997, 43039), 'shutil.copy', 'shutil.copy', (['source_file', 'destination_file'], {}), '(source_file, destination_file)\n', (43008, 43039), False, 'import shutil\n'), ((49145, 49192), 'pinecone.ServerlessSpec', 'ServerlessSpec', ([], {'cloud': '"""aws"""', 'region': '"""us-west-2"""'}), "(cloud='aws', region='us-west-2')\n", (49159, 49192), False, 'from pinecone import ServerlessSpec\n'), ((50148, 50176), 'pandas.concat', 'pd.concat', (['[existing_df, df]'], {}), '([existing_df, df])\n', (50157, 50176), True, 'import pandas as pd\n'), ((6905, 6926), 'os.listdir', 'os.listdir', (['index_dir'], {}), '(index_dir)\n', (6915, 6926), False, 'import os\n'), ((8037, 8063), 'os.path.basename', 'os.path.basename', (['mp3_file'], {}), '(mp3_file)\n', (8053, 8063), False, 'import os\n'), ((9948, 9974), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (9960, 9974), False, 'import os\n'), ((12046, 12072), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (12058, 12072), False, 'import os\n'), ((14560, 14586), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (14572, 14586), False, 'import os\n'), ((17853, 17872), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (17867, 17872), False, 'import os\n'), ((17978, 17997), 'os.rename', 'os.rename', (['src', 'dst'], {}), '(src, dst)\n', (17987, 17997), False, 'import os\n'), ((18142, 18157), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (18152, 18157), False, 'import os\n'), ((20209, 20238), 'os.remove', 'os.remove', (['original_file_path'], {}), '(original_file_path)\n', (20218, 20238), False, 'import os\n'), ((20408, 20452), 'os.rename', 'os.rename', (['original_file_path', 'new_file_path'], {}), '(original_file_path, new_file_path)\n', (20417, 20452), False, 'import os\n'), ((21680, 21709), 'os.remove', 'os.remove', (['original_file_path'], {}), '(original_file_path)\n', (21689, 21709), False, 'import os\n'), ((21860, 21904), 'os.rename', 'os.rename', (['original_file_path', 'new_file_path'], {}), '(original_file_path, new_file_path)\n', (21869, 21904), False, 'import os\n'), ((22440, 22472), 'shutil.rmtree', 'shutil.rmtree', (['original_dir_path'], {}), '(original_dir_path)\n', (22453, 22472), False, 'import shutil\n'), ((22655, 22697), 'os.rename', 'os.rename', (['original_dir_path', 'new_dir_path'], {}), '(original_dir_path, new_dir_path)\n', (22664, 22697), False, 'import os\n'), ((29266, 29296), 'os.path.exists', 'os.path.exists', (['json_file_path'], {}), '(json_file_path)\n', (29280, 29296), False, 'import os\n'), ((29301, 29330), 'os.path.exists', 'os.path.exists', (['txt_file_path'], {}), '(txt_file_path)\n', (29315, 29330), False, 'import os\n'), ((34962, 35014), 'shutil.copy', 'shutil.copy', (['source_file_path', 'destination_file_path'], {}), '(source_file_path, destination_file_path)\n', (34973, 35014), False, 'import shutil\n'), ((41022, 41064), 'shutil.copy', 'shutil.copy', (['source_file', 'destination_file'], {}), '(source_file, destination_file)\n', (41033, 41064), False, 'import shutil\n'), ((44194, 44228), 'os.path.relpath', 'os.path.relpath', (['root', 'source_root'], {}), '(root, source_root)\n', (44209, 44228), False, 'import os\n'), ((45118, 45142), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (45130, 45142), False, 'import os\n'), ((45177, 45216), 'os.path.join', 'os.path.join', (['target_root', 'new_filename'], {}), '(target_root, new_filename)\n', (45189, 45216), False, 'import os\n'), ((45274, 45312), 'shutil.copy2', 'shutil.copy2', (['source_file', 'target_file'], {}), '(source_file, target_file)\n', (45286, 45312), False, 'import shutil\n'), ((8147, 8172), 'os.path.basename', 'os.path.basename', (['jt_file'], {}), '(jt_file)\n', (8163, 8172), False, 'import os\n'), ((18194, 18217), 'os.path.join', 'os.path.join', (['src', 'item'], {}), '(src, item)\n', (18206, 18217), False, 'import os\n'), ((18322, 18346), 'os.path.exists', 'os.path.exists', (['dst_item'], {}), '(dst_item)\n', (18336, 18346), False, 'import os\n'), ((23522, 23557), 'os.path.join', 'os.path.join', (['subdir_path', 'mp3_file'], {}), '(subdir_path, mp3_file)\n', (23534, 23557), False, 'import os\n'), ((23651, 23675), 'os.remove', 'os.remove', (['mp3_file_path'], {}), '(mp3_file_path)\n', (23660, 23675), False, 'import os\n'), ((26095, 26119), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (26107, 26119), False, 'import os\n'), ((44373, 44402), 'os.path.basename', 'os.path.basename', (['source_root'], {}), '(source_root)\n', (44389, 44402), False, 'import os\n'), ((18453, 18472), 'os.remove', 'os.remove', (['src_item'], {}), '(src_item)\n', (18462, 18472), False, 'import os\n'), ((18605, 18636), 'shutil.move', 'shutil.move', (['src_item', 'dst_item'], {}), '(src_item, dst_item)\n', (18616, 18636), False, 'import shutil\n'), ((26137, 26161), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (26149, 26161), False, 'import os\n'), ((42163, 42192), 'os.path.basename', 'os.path.basename', (['source_file'], {}), '(source_file)\n', (42179, 42192), False, 'import os\n')] |
import logging
from dataclasses import dataclass
from typing import Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.base import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser
from llama_index.node_parser.sentence_window import SentenceWindowNodeParser
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index.prompts.base import BasePromptTemplate
from llama_index.text_splitter.types import TextSplitter
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SimpleNodeParser.from_defaults(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
node_parser: dict
text_splitter: Optional[dict]
metadata_extractor: Optional[dict]
extractors: Optional[list]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
node_parser: NodeParser
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
llama_logger=llama_logger,
callback_manager=callback_manager,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or service_context.node_parser
if chunk_size is not None or chunk_overlap is not None:
node_parser = _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
if not isinstance(self.llm_predictor, LLMPredictor):
raise ValueError("llm_predictor must be an instance of LLMPredictor")
return self.llm_predictor.llm
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
node_parser_dict = self.node_parser.to_dict()
metadata_extractor_dict = None
extractor_dicts = None
text_splitter_dict = None
if isinstance(self.node_parser, SimpleNodeParser) and isinstance(
self.node_parser.text_splitter, TextSplitter
):
text_splitter_dict = self.node_parser.text_splitter.to_dict()
if isinstance(self.node_parser, (SimpleNodeParser, SentenceWindowNodeParser)):
if self.node_parser.metadata_extractor:
metadata_extractor_dict = self.node_parser.metadata_extractor.to_dict()
extractor_dicts = []
for extractor in self.node_parser.metadata_extractor.extractors:
extractor_dicts.append(extractor.to_dict())
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
node_parser=node_parser_dict,
text_splitter=text_splitter_dict,
metadata_extractor=metadata_extractor_dict,
extractors=extractor_dicts,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.llm_predictor.loading import load_predictor
from llama_index.llms.loading import load_llm
from llama_index.node_parser.extractors.loading import load_extractor
from llama_index.node_parser.loading import load_parser
from llama_index.text_splitter.loading import load_text_splitter
service_context_data = ServiceContextData.parse_obj(data)
llm = load_llm(service_context_data.llm)
llm_predictor = load_predictor(service_context_data.llm_predictor, llm=llm)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
extractors = None
if service_context_data.extractors:
extractors = []
for extractor_dict in service_context_data.extractors:
extractors.append(load_extractor(extractor_dict, llm=llm))
metadata_extractor = None
if service_context_data.metadata_extractor:
metadata_extractor = load_extractor(
service_context_data.metadata_extractor,
extractors=extractors,
)
text_splitter = None
if service_context_data.text_splitter:
text_splitter = load_text_splitter(service_context_data.text_splitter)
node_parser = load_parser(
service_context_data.node_parser,
text_splitter=text_splitter,
metadata_extractor=metadata_extractor,
)
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.llms.loading.load_llm",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.node_parser.extractors.loading.load_extractor",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.text_splitter.loading.load_text_splitter",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.utils.resolve_embed_model"
] | [((1015, 1042), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1032, 1042), False, 'import logging\n'), ((1273, 1395), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (1303, 1395), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1798, 1855), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1828, 1855), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6560, 6592), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (6579, 6592), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((9616, 9648), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (9635, 9648), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((13008, 13042), 'llama_index.llms.loading.load_llm', 'load_llm', (['service_context_data.llm'], {}), '(service_context_data.llm)\n', (13016, 13042), False, 'from llama_index.llms.loading import load_llm\n'), ((13067, 13126), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {'llm': 'llm'}), '(service_context_data.llm_predictor, llm=llm)\n', (13081, 13126), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((13150, 13200), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (13166, 13200), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((13226, 13284), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (13248, 13284), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((13955, 14072), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['service_context_data.node_parser'], {'text_splitter': 'text_splitter', 'metadata_extractor': 'metadata_extractor'}), '(service_context_data.node_parser, text_splitter=text_splitter,\n metadata_extractor=metadata_extractor)\n', (13966, 14072), False, 'from llama_index.node_parser.loading import load_parser\n'), ((5826, 5845), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5841, 5845), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6013, 6029), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6024, 6029), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6071, 6137), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6083, 6137), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((7093, 7106), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (7104, 7106), False, 'from llama_index.logger import LlamaLogger\n'), ((8911, 8927), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (8922, 8927), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((8956, 8977), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (8968, 8977), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((13646, 13724), 'llama_index.node_parser.extractors.loading.load_extractor', 'load_extractor', (['service_context_data.metadata_extractor'], {'extractors': 'extractors'}), '(service_context_data.metadata_extractor, extractors=extractors)\n', (13660, 13724), False, 'from llama_index.node_parser.extractors.loading import load_extractor\n'), ((13877, 13931), 'llama_index.text_splitter.loading.load_text_splitter', 'load_text_splitter', (['service_context_data.text_splitter'], {}), '(service_context_data.text_splitter)\n', (13895, 13931), False, 'from llama_index.text_splitter.loading import load_text_splitter\n'), ((13485, 13524), 'llama_index.node_parser.extractors.loading.load_extractor', 'load_extractor', (['extractor_dict'], {'llm': 'llm'}), '(extractor_dict, llm=llm)\n', (13499, 13524), False, 'from llama_index.node_parser.extractors.loading import load_extractor\n')] |
import logging
from typing import (
Any,
Callable,
Generator,
Optional,
Sequence,
Type,
cast,
AsyncGenerator,
)
from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.indices.utils import truncate_text
from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.default_prompt_selectors import (
DEFAULT_REFINE_PROMPT_SEL,
DEFAULT_TEXT_QA_PROMPT_SEL,
)
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.response.utils import get_response_text
from llama_index.core.response_synthesizers.base import BaseSynthesizer
from llama_index.core.service_context import ServiceContext
from llama_index.core.service_context_elements.llm_predictor import (
LLMPredictorType,
)
from llama_index.core.types import RESPONSE_TEXT_TYPE, BasePydanticProgram
from llama_index.core.instrumentation.events.synthesis import (
GetResponseEndEvent,
GetResponseStartEvent,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
logger = logging.getLogger(__name__)
class StructuredRefineResponse(BaseModel):
"""
Used to answer a given query based on the provided context.
Also indicates if the query was satisfied with the provided answer.
"""
answer: str = Field(
description="The answer for the given query, based on the context and not "
"prior knowledge."
)
query_satisfied: bool = Field(
description="True if there was enough context given to provide an answer "
"that satisfies the query."
)
class DefaultRefineProgram(BasePydanticProgram):
"""
Runs the query on the LLM as normal and always returns the answer with
query_satisfied=True. In effect, doesn't do any answer filtering.
"""
def __init__(
self, prompt: BasePromptTemplate, llm: LLMPredictorType, output_cls: BaseModel
):
self._prompt = prompt
self._llm = llm
self._output_cls = output_cls
@property
def output_cls(self) -> Type[BaseModel]:
return StructuredRefineResponse
def __call__(self, *args: Any, **kwds: Any) -> StructuredRefineResponse:
if self._output_cls is not None:
answer = self._llm.structured_predict(
self._output_cls,
self._prompt,
**kwds,
)
answer = answer.json()
else:
answer = self._llm.predict(
self._prompt,
**kwds,
)
return StructuredRefineResponse(answer=answer, query_satisfied=True)
async def acall(self, *args: Any, **kwds: Any) -> StructuredRefineResponse:
if self._output_cls is not None:
answer = await self._llm.astructured_predict(
self._output_cls,
self._prompt,
**kwds,
)
answer = answer.json()
else:
answer = await self._llm.apredict(
self._prompt,
**kwds,
)
return StructuredRefineResponse(answer=answer, query_satisfied=True)
class Refine(BaseSynthesizer):
"""Refine a response to a query across text chunks."""
def __init__(
self,
llm: Optional[LLMPredictorType] = None,
callback_manager: Optional[CallbackManager] = None,
prompt_helper: Optional[PromptHelper] = None,
text_qa_template: Optional[BasePromptTemplate] = None,
refine_template: Optional[BasePromptTemplate] = None,
output_cls: Optional[BaseModel] = None,
streaming: bool = False,
verbose: bool = False,
structured_answer_filtering: bool = False,
program_factory: Optional[
Callable[[BasePromptTemplate], BasePydanticProgram]
] = None,
# deprecated
service_context: Optional[ServiceContext] = None,
) -> None:
if service_context is not None:
prompt_helper = service_context.prompt_helper
super().__init__(
llm=llm,
callback_manager=callback_manager,
prompt_helper=prompt_helper,
service_context=service_context,
streaming=streaming,
)
self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT_SEL
self._refine_template = refine_template or DEFAULT_REFINE_PROMPT_SEL
self._verbose = verbose
self._structured_answer_filtering = structured_answer_filtering
self._output_cls = output_cls
if self._streaming and self._structured_answer_filtering:
raise ValueError(
"Streaming not supported with structured answer filtering."
)
if not self._structured_answer_filtering and program_factory is not None:
raise ValueError(
"Program factory not supported without structured answer filtering."
)
self._program_factory = program_factory or self._default_program_factory
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"text_qa_template": self._text_qa_template,
"refine_template": self._refine_template,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "text_qa_template" in prompts:
self._text_qa_template = prompts["text_qa_template"]
if "refine_template" in prompts:
self._refine_template = prompts["refine_template"]
@dispatcher.span
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
prev_response: Optional[RESPONSE_TEXT_TYPE] = None,
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Give response over chunks."""
dispatcher.event(GetResponseStartEvent())
response: Optional[RESPONSE_TEXT_TYPE] = None
for text_chunk in text_chunks:
if prev_response is None:
# if this is the first chunk, and text chunk already
# is an answer, then return it
response = self._give_response_single(
query_str, text_chunk, **response_kwargs
)
else:
# refine response if possible
response = self._refine_response_single(
prev_response, query_str, text_chunk, **response_kwargs
)
prev_response = response
if isinstance(response, str):
if self._output_cls is not None:
response = self._output_cls.parse_raw(response)
else:
response = response or "Empty Response"
else:
response = cast(Generator, response)
dispatcher.event(GetResponseEndEvent())
return response
def _default_program_factory(self, prompt: PromptTemplate) -> BasePydanticProgram:
if self._structured_answer_filtering:
from llama_index.core.program.utils import get_program_for_llm
return get_program_for_llm(
StructuredRefineResponse,
prompt,
self._llm,
verbose=self._verbose,
)
else:
return DefaultRefineProgram(
prompt=prompt,
llm=self._llm,
output_cls=self._output_cls,
)
def _give_response_single(
self,
query_str: str,
text_chunk: str,
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Give response given a query and a corresponding text chunk."""
text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
text_chunks = self._prompt_helper.repack(text_qa_template, [text_chunk])
response: Optional[RESPONSE_TEXT_TYPE] = None
program = self._program_factory(text_qa_template)
# TODO: consolidate with loop in get_response_default
for cur_text_chunk in text_chunks:
query_satisfied = False
if response is None and not self._streaming:
try:
structured_response = cast(
StructuredRefineResponse,
program(
context_str=cur_text_chunk,
**response_kwargs,
),
)
query_satisfied = structured_response.query_satisfied
if query_satisfied:
response = structured_response.answer
except ValidationError as e:
logger.warning(
f"Validation error on structured response: {e}", exc_info=True
)
elif response is None and self._streaming:
response = self._llm.stream(
text_qa_template,
context_str=cur_text_chunk,
**response_kwargs,
)
query_satisfied = True
else:
response = self._refine_response_single(
cast(RESPONSE_TEXT_TYPE, response),
query_str,
cur_text_chunk,
**response_kwargs,
)
if response is None:
response = "Empty Response"
if isinstance(response, str):
response = response or "Empty Response"
else:
response = cast(Generator, response)
return response
def _refine_response_single(
self,
response: RESPONSE_TEXT_TYPE,
query_str: str,
text_chunk: str,
**response_kwargs: Any,
) -> Optional[RESPONSE_TEXT_TYPE]:
"""Refine response."""
# TODO: consolidate with logic in response/schema.py
if isinstance(response, Generator):
response = get_response_text(response)
fmt_text_chunk = truncate_text(text_chunk, 50)
logger.debug(f"> Refine context: {fmt_text_chunk}")
if self._verbose:
print(f"> Refine context: {fmt_text_chunk}")
# NOTE: partial format refine template with query_str and existing_answer here
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
# compute available chunk size to see if there is any available space
# determine if the refine template is too big (which can happen if
# prompt template + query + existing answer is too large)
avail_chunk_size = self._prompt_helper._get_available_chunk_size(
refine_template
)
if avail_chunk_size < 0:
# if the available chunk size is negative, then the refine template
# is too big and we just return the original response
return response
# obtain text chunks to add to the refine template
text_chunks = self._prompt_helper.repack(
refine_template, text_chunks=[text_chunk]
)
program = self._program_factory(refine_template)
for cur_text_chunk in text_chunks:
query_satisfied = False
if not self._streaming:
try:
structured_response = cast(
StructuredRefineResponse,
program(
context_msg=cur_text_chunk,
**response_kwargs,
),
)
query_satisfied = structured_response.query_satisfied
if query_satisfied:
response = structured_response.answer
except ValidationError as e:
logger.warning(
f"Validation error on structured response: {e}", exc_info=True
)
else:
# TODO: structured response not supported for streaming
if isinstance(response, Generator):
response = "".join(response)
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
response = self._llm.stream(
refine_template,
context_msg=cur_text_chunk,
**response_kwargs,
)
return response
@dispatcher.span
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
prev_response: Optional[RESPONSE_TEXT_TYPE] = None,
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
dispatcher.event(GetResponseStartEvent())
response: Optional[RESPONSE_TEXT_TYPE] = None
for text_chunk in text_chunks:
if prev_response is None:
# if this is the first chunk, and text chunk already
# is an answer, then return it
response = await self._agive_response_single(
query_str, text_chunk, **response_kwargs
)
else:
response = await self._arefine_response_single(
prev_response, query_str, text_chunk, **response_kwargs
)
prev_response = response
if response is None:
response = "Empty Response"
if isinstance(response, str):
if self._output_cls is not None:
response = self._output_cls.parse_raw(response)
else:
response = response or "Empty Response"
else:
response = cast(AsyncGenerator, response)
return response
async def _arefine_response_single(
self,
response: RESPONSE_TEXT_TYPE,
query_str: str,
text_chunk: str,
**response_kwargs: Any,
) -> Optional[RESPONSE_TEXT_TYPE]:
"""Refine response."""
# TODO: consolidate with logic in response/schema.py
if isinstance(response, Generator):
response = get_response_text(response)
fmt_text_chunk = truncate_text(text_chunk, 50)
logger.debug(f"> Refine context: {fmt_text_chunk}")
# NOTE: partial format refine template with query_str and existing_answer here
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
# compute available chunk size to see if there is any available space
# determine if the refine template is too big (which can happen if
# prompt template + query + existing answer is too large)
avail_chunk_size = self._prompt_helper._get_available_chunk_size(
refine_template
)
if avail_chunk_size < 0:
# if the available chunk size is negative, then the refine template
# is too big and we just return the original response
return response
# obtain text chunks to add to the refine template
text_chunks = self._prompt_helper.repack(
refine_template, text_chunks=[text_chunk]
)
program = self._program_factory(refine_template)
for cur_text_chunk in text_chunks:
query_satisfied = False
if not self._streaming:
try:
structured_response = await program.acall(
context_msg=cur_text_chunk,
**response_kwargs,
)
structured_response = cast(
StructuredRefineResponse, structured_response
)
query_satisfied = structured_response.query_satisfied
if query_satisfied:
response = structured_response.answer
except ValidationError as e:
logger.warning(
f"Validation error on structured response: {e}", exc_info=True
)
else:
if isinstance(response, Generator):
response = "".join(response)
if isinstance(response, AsyncGenerator):
_r = ""
async for text in response:
_r += text
response = _r
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
response = await self._llm.astream(
refine_template,
context_msg=cur_text_chunk,
**response_kwargs,
)
if query_satisfied:
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
return response
async def _agive_response_single(
self,
query_str: str,
text_chunk: str,
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Give response given a query and a corresponding text chunk."""
text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
text_chunks = self._prompt_helper.repack(text_qa_template, [text_chunk])
response: Optional[RESPONSE_TEXT_TYPE] = None
program = self._program_factory(text_qa_template)
# TODO: consolidate with loop in get_response_default
for cur_text_chunk in text_chunks:
if response is None and not self._streaming:
try:
structured_response = await program.acall(
context_str=cur_text_chunk,
**response_kwargs,
)
structured_response = cast(
StructuredRefineResponse, structured_response
)
query_satisfied = structured_response.query_satisfied
if query_satisfied:
response = structured_response.answer
except ValidationError as e:
logger.warning(
f"Validation error on structured response: {e}", exc_info=True
)
elif response is None and self._streaming:
response = await self._llm.astream(
text_qa_template,
context_str=cur_text_chunk,
**response_kwargs,
)
query_satisfied = True
else:
response = await self._arefine_response_single(
cast(RESPONSE_TEXT_TYPE, response),
query_str,
cur_text_chunk,
**response_kwargs,
)
if response is None:
response = "Empty Response"
if isinstance(response, str):
response = response or "Empty Response"
else:
response = cast(AsyncGenerator, response)
return response
| [
"llama_index.core.response.utils.get_response_text",
"llama_index.core.instrumentation.events.synthesis.GetResponseStartEvent",
"llama_index.core.indices.utils.truncate_text",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.program.utils.get_program_for_llm",
"llama_index.core.instrumentation.events.synthesis.GetResponseEndEvent"
] | [((1218, 1253), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1243, 1253), True, 'import llama_index.core.instrumentation as instrument\n'), ((1264, 1291), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1281, 1291), False, 'import logging\n'), ((1509, 1617), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""The answer for the given query, based on the context and not prior knowledge."""'}), "(description=\n 'The answer for the given query, based on the context and not prior knowledge.'\n )\n", (1514, 1617), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError\n'), ((1661, 1777), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""True if there was enough context given to provide an answer that satisfies the query."""'}), "(description=\n 'True if there was enough context given to provide an answer that satisfies the query.'\n )\n", (1666, 1777), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError\n'), ((10213, 10242), 'llama_index.core.indices.utils.truncate_text', 'truncate_text', (['text_chunk', '(50)'], {}), '(text_chunk, 50)\n', (10226, 10242), False, 'from llama_index.core.indices.utils import truncate_text\n'), ((14429, 14458), 'llama_index.core.indices.utils.truncate_text', 'truncate_text', (['text_chunk', '(50)'], {}), '(text_chunk, 50)\n', (14442, 14458), False, 'from llama_index.core.indices.utils import truncate_text\n'), ((6047, 6070), 'llama_index.core.instrumentation.events.synthesis.GetResponseStartEvent', 'GetResponseStartEvent', ([], {}), '()\n', (6068, 6070), False, 'from llama_index.core.instrumentation.events.synthesis import GetResponseEndEvent, GetResponseStartEvent\n'), ((6963, 6988), 'typing.cast', 'cast', (['Generator', 'response'], {}), '(Generator, response)\n', (6967, 6988), False, 'from typing import Any, Callable, Generator, Optional, Sequence, Type, cast, AsyncGenerator\n'), ((7014, 7035), 'llama_index.core.instrumentation.events.synthesis.GetResponseEndEvent', 'GetResponseEndEvent', ([], {}), '()\n', (7033, 7035), False, 'from llama_index.core.instrumentation.events.synthesis import GetResponseEndEvent, GetResponseStartEvent\n'), ((7290, 7382), 'llama_index.core.program.utils.get_program_for_llm', 'get_program_for_llm', (['StructuredRefineResponse', 'prompt', 'self._llm'], {'verbose': 'self._verbose'}), '(StructuredRefineResponse, prompt, self._llm, verbose=\n self._verbose)\n', (7309, 7382), False, 'from llama_index.core.program.utils import get_program_for_llm\n'), ((9744, 9769), 'typing.cast', 'cast', (['Generator', 'response'], {}), '(Generator, response)\n', (9748, 9769), False, 'from typing import Any, Callable, Generator, Optional, Sequence, Type, cast, AsyncGenerator\n'), ((10159, 10186), 'llama_index.core.response.utils.get_response_text', 'get_response_text', (['response'], {}), '(response)\n', (10176, 10186), False, 'from llama_index.core.response.utils import get_response_text\n'), ((12995, 13018), 'llama_index.core.instrumentation.events.synthesis.GetResponseStartEvent', 'GetResponseStartEvent', ([], {}), '()\n', (13016, 13018), False, 'from llama_index.core.instrumentation.events.synthesis import GetResponseEndEvent, GetResponseStartEvent\n'), ((13948, 13978), 'typing.cast', 'cast', (['AsyncGenerator', 'response'], {}), '(AsyncGenerator, response)\n', (13952, 13978), False, 'from typing import Any, Callable, Generator, Optional, Sequence, Type, cast, AsyncGenerator\n'), ((14375, 14402), 'llama_index.core.response.utils.get_response_text', 'get_response_text', (['response'], {}), '(response)\n', (14392, 14402), False, 'from llama_index.core.response.utils import get_response_text\n'), ((19354, 19384), 'typing.cast', 'cast', (['AsyncGenerator', 'response'], {}), '(AsyncGenerator, response)\n', (19358, 19384), False, 'from typing import Any, Callable, Generator, Optional, Sequence, Type, cast, AsyncGenerator\n'), ((15869, 15920), 'typing.cast', 'cast', (['StructuredRefineResponse', 'structured_response'], {}), '(StructuredRefineResponse, structured_response)\n', (15873, 15920), False, 'from typing import Any, Callable, Generator, Optional, Sequence, Type, cast, AsyncGenerator\n'), ((18143, 18194), 'typing.cast', 'cast', (['StructuredRefineResponse', 'structured_response'], {}), '(StructuredRefineResponse, structured_response)\n', (18147, 18194), False, 'from typing import Any, Callable, Generator, Optional, Sequence, Type, cast, AsyncGenerator\n'), ((9388, 9422), 'typing.cast', 'cast', (['RESPONSE_TEXT_TYPE', 'response'], {}), '(RESPONSE_TEXT_TYPE, response)\n', (9392, 9422), False, 'from typing import Any, Callable, Generator, Optional, Sequence, Type, cast, AsyncGenerator\n'), ((18998, 19032), 'typing.cast', 'cast', (['RESPONSE_TEXT_TYPE', 'response'], {}), '(RESPONSE_TEXT_TYPE, response)\n', (19002, 19032), False, 'from typing import Any, Callable, Generator, Optional, Sequence, Type, cast, AsyncGenerator\n')] |
"""Base retriever."""
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.query_pipeline.query import (
ChainableMixin,
InputKeys,
OutputKeys,
QueryComponent,
validate_and_convert_stringable,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.prompts.mixin import (
PromptDictType,
PromptMixin,
PromptMixinType,
)
from llama_index.core.schema import (
BaseNode,
IndexNode,
NodeWithScore,
QueryBundle,
QueryType,
TextNode,
)
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import Settings
from llama_index.core.utils import print_text
from llama_index.core.instrumentation.events.retrieval import (
RetrievalEndEvent,
RetrievalStartEvent,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class BaseRetriever(ChainableMixin, PromptMixin):
"""Base retriever."""
def __init__(
self,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[Dict] = None,
objects: Optional[List[IndexNode]] = None,
verbose: bool = False,
) -> None:
self.callback_manager = callback_manager or CallbackManager()
if objects is not None:
object_map = {obj.index_id: obj.obj for obj in objects}
self.object_map = object_map or {}
self._verbose = verbose
def _check_callback_manager(self) -> None:
"""Check callback manager."""
if not hasattr(self, "callback_manager"):
self.callback_manager = Settings.callback_manager
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
def _retrieve_from_object(
self,
obj: Any,
query_bundle: QueryBundle,
score: float,
) -> List[NodeWithScore]:
"""Retrieve nodes from object."""
if self._verbose:
print_text(
f"Retrieving from object {obj.__class__.__name__} with query {query_bundle.query_str}\n",
color="llama_pink",
)
if isinstance(obj, NodeWithScore):
return [obj]
elif isinstance(obj, BaseNode):
return [NodeWithScore(node=obj, score=score)]
elif isinstance(obj, BaseQueryEngine):
response = obj.query(query_bundle)
return [
NodeWithScore(
node=TextNode(text=str(response), metadata=response.metadata or {}),
score=score,
)
]
elif isinstance(obj, BaseRetriever):
return obj.retrieve(query_bundle)
elif isinstance(obj, QueryComponent):
component_keys = obj.input_keys.required_keys
if len(component_keys) > 1:
raise ValueError(
f"QueryComponent {obj} has more than one input key: {component_keys}"
)
elif len(component_keys) == 0:
component_response = obj.run_component()
else:
kwargs = {next(iter(component_keys)): query_bundle.query_str}
component_response = obj.run_component(**kwargs)
result_output = str(next(iter(component_response.values())))
return [NodeWithScore(node=TextNode(text=result_output), score=score)]
else:
raise ValueError(f"Object {obj} is not retrievable.")
async def _aretrieve_from_object(
self,
obj: Any,
query_bundle: QueryBundle,
score: float,
) -> List[NodeWithScore]:
"""Retrieve nodes from object."""
if isinstance(obj, NodeWithScore):
return [obj]
elif isinstance(obj, BaseNode):
return [NodeWithScore(node=obj, score=score)]
elif isinstance(obj, BaseQueryEngine):
response = await obj.aquery(query_bundle)
return [NodeWithScore(node=TextNode(text=str(response)), score=score)]
elif isinstance(obj, BaseRetriever):
return await obj.aretrieve(query_bundle)
elif isinstance(obj, QueryComponent):
component_keys = obj.input_keys.required_keys
if len(component_keys) > 1:
raise ValueError(
f"QueryComponent {obj} has more than one input key: {component_keys}"
)
elif len(component_keys) == 0:
component_response = await obj.arun_component()
else:
kwargs = {next(iter(component_keys)): query_bundle.query_str}
component_response = await obj.arun_component(**kwargs)
result_output = str(next(iter(component_response.values())))
return [NodeWithScore(node=TextNode(text=result_output), score=score)]
else:
raise ValueError(f"Object {obj} is not retrievable.")
def _handle_recursive_retrieval(
self, query_bundle: QueryBundle, nodes: List[NodeWithScore]
) -> List[NodeWithScore]:
retrieved_nodes: List[NodeWithScore] = []
for n in nodes:
node = n.node
score = n.score or 1.0
if isinstance(node, IndexNode):
obj = node.obj or self.object_map.get(node.index_id, None)
if obj is not None:
if self._verbose:
print_text(
f"Retrieval entering {node.index_id}: {obj.__class__.__name__}\n",
color="llama_turquoise",
)
retrieved_nodes.extend(
self._retrieve_from_object(
obj, query_bundle=query_bundle, score=score
)
)
else:
retrieved_nodes.append(n)
else:
retrieved_nodes.append(n)
seen = set()
return [
n
for n in retrieved_nodes
if not (n.node.hash in seen or seen.add(n.node.hash)) # type: ignore[func-returns-value]
]
async def _ahandle_recursive_retrieval(
self, query_bundle: QueryBundle, nodes: List[NodeWithScore]
) -> List[NodeWithScore]:
retrieved_nodes: List[NodeWithScore] = []
for n in nodes:
node = n.node
score = n.score or 1.0
if isinstance(node, IndexNode):
obj = node.obj or self.object_map.get(node.index_id, None)
if obj is not None:
if self._verbose:
print_text(
f"Retrieval entering {node.index_id}: {obj.__class__.__name__}\n",
color="llama_turquoise",
)
# TODO: Add concurrent execution via `run_jobs()` ?
retrieved_nodes.extend(
await self._aretrieve_from_object(
obj, query_bundle=query_bundle, score=score
)
)
else:
retrieved_nodes.append(n)
else:
retrieved_nodes.append(n)
# remove any duplicates based on hash
seen = set()
return [
n
for n in retrieved_nodes
if not (n.node.hash in seen or seen.add(n.node.hash)) # type: ignore[func-returns-value]
]
@dispatcher.span
def retrieve(self, str_or_query_bundle: QueryType) -> List[NodeWithScore]:
"""Retrieve nodes given query.
Args:
str_or_query_bundle (QueryType): Either a query string or
a QueryBundle object.
"""
self._check_callback_manager()
dispatcher.event(RetrievalStartEvent())
if isinstance(str_or_query_bundle, str):
query_bundle = QueryBundle(str_or_query_bundle)
else:
query_bundle = str_or_query_bundle
with self.callback_manager.as_trace("query"):
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = self._retrieve(query_bundle)
nodes = self._handle_recursive_retrieval(query_bundle, nodes)
retrieve_event.on_end(
payload={EventPayload.NODES: nodes},
)
dispatcher.event(RetrievalEndEvent())
return nodes
@dispatcher.span
async def aretrieve(self, str_or_query_bundle: QueryType) -> List[NodeWithScore]:
self._check_callback_manager()
dispatcher.event(RetrievalStartEvent())
if isinstance(str_or_query_bundle, str):
query_bundle = QueryBundle(str_or_query_bundle)
else:
query_bundle = str_or_query_bundle
with self.callback_manager.as_trace("query"):
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = await self._aretrieve(query_bundle=query_bundle)
nodes = await self._ahandle_recursive_retrieval(
query_bundle=query_bundle, nodes=nodes
)
retrieve_event.on_end(
payload={EventPayload.NODES: nodes},
)
dispatcher.event(RetrievalEndEvent())
return nodes
@abstractmethod
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query.
Implemented by the user.
"""
# TODO: make this abstract
# @abstractmethod
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Asynchronously retrieve nodes given query.
Implemented by the user.
"""
return self._retrieve(query_bundle)
def get_service_context(self) -> Optional[ServiceContext]:
"""Attempts to resolve a service context.
Short-circuits at self.service_context, self._service_context,
or self._index.service_context.
"""
if hasattr(self, "service_context"):
return self.service_context
if hasattr(self, "_service_context"):
return self._service_context
elif hasattr(self, "_index") and hasattr(self._index, "service_context"):
return self._index.service_context
return None
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""Return a query component."""
return RetrieverComponent(retriever=self)
class RetrieverComponent(QueryComponent):
"""Retriever component."""
retriever: BaseRetriever = Field(..., description="Retriever")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
self.retriever.callback_manager = callback_manager
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# make sure input is a string
input["input"] = validate_and_convert_stringable(input["input"])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
output = self.retriever.retrieve(kwargs["input"])
return {"output": output}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
output = await self.retriever.aretrieve(kwargs["input"])
return {"output": output}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys({"input"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
| [
"llama_index.core.instrumentation.events.retrieval.RetrievalStartEvent",
"llama_index.core.instrumentation.events.retrieval.RetrievalEndEvent",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.utils.print_text",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.schema.TextNode",
"llama_index.core.schema.QueryBundle",
"llama_index.core.schema.NodeWithScore",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable"
] | [((1092, 1127), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1117, 1127), True, 'import llama_index.core.instrumentation as instrument\n'), ((11319, 11354), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever"""'}), "(..., description='Retriever')\n", (11324, 11354), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((11797, 11844), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (11828, 11844), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((12323, 12353), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (12342, 12353), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((12452, 12484), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (12472, 12484), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((1491, 1508), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1506, 1508), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((2409, 2539), 'llama_index.core.utils.print_text', 'print_text', (['f"""Retrieving from object {obj.__class__.__name__} with query {query_bundle.query_str}\n"""'], {'color': '"""llama_pink"""'}), "(\n f'Retrieving from object {obj.__class__.__name__} with query {query_bundle.query_str}\\n'\n , color='llama_pink')\n", (2419, 2539), False, 'from llama_index.core.utils import print_text\n'), ((8276, 8297), 'llama_index.core.instrumentation.events.retrieval.RetrievalStartEvent', 'RetrievalStartEvent', ([], {}), '()\n', (8295, 8297), False, 'from llama_index.core.instrumentation.events.retrieval import RetrievalEndEvent, RetrievalStartEvent\n'), ((8375, 8407), 'llama_index.core.schema.QueryBundle', 'QueryBundle', (['str_or_query_bundle'], {}), '(str_or_query_bundle)\n', (8386, 8407), False, 'from llama_index.core.schema import BaseNode, IndexNode, NodeWithScore, QueryBundle, QueryType, TextNode\n'), ((8984, 9003), 'llama_index.core.instrumentation.events.retrieval.RetrievalEndEvent', 'RetrievalEndEvent', ([], {}), '()\n', (9001, 9003), False, 'from llama_index.core.instrumentation.events.retrieval import RetrievalEndEvent, RetrievalStartEvent\n'), ((9198, 9219), 'llama_index.core.instrumentation.events.retrieval.RetrievalStartEvent', 'RetrievalStartEvent', ([], {}), '()\n', (9217, 9219), False, 'from llama_index.core.instrumentation.events.retrieval import RetrievalEndEvent, RetrievalStartEvent\n'), ((9297, 9329), 'llama_index.core.schema.QueryBundle', 'QueryBundle', (['str_or_query_bundle'], {}), '(str_or_query_bundle)\n', (9308, 9329), False, 'from llama_index.core.schema import BaseNode, IndexNode, NodeWithScore, QueryBundle, QueryType, TextNode\n'), ((9990, 10009), 'llama_index.core.instrumentation.events.retrieval.RetrievalEndEvent', 'RetrievalEndEvent', ([], {}), '()\n', (10007, 10009), False, 'from llama_index.core.instrumentation.events.retrieval import RetrievalEndEvent, RetrievalStartEvent\n'), ((2705, 2741), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'obj', 'score': 'score'}), '(node=obj, score=score)\n', (2718, 2741), False, 'from llama_index.core.schema import BaseNode, IndexNode, NodeWithScore, QueryBundle, QueryType, TextNode\n'), ((4246, 4282), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'obj', 'score': 'score'}), '(node=obj, score=score)\n', (4259, 4282), False, 'from llama_index.core.schema import BaseNode, IndexNode, NodeWithScore, QueryBundle, QueryType, TextNode\n'), ((5852, 5958), 'llama_index.core.utils.print_text', 'print_text', (['f"""Retrieval entering {node.index_id}: {obj.__class__.__name__}\n"""'], {'color': '"""llama_turquoise"""'}), "(f'Retrieval entering {node.index_id}: {obj.__class__.__name__}\\n',\n color='llama_turquoise')\n", (5862, 5958), False, 'from llama_index.core.utils import print_text\n'), ((7079, 7185), 'llama_index.core.utils.print_text', 'print_text', (['f"""Retrieval entering {node.index_id}: {obj.__class__.__name__}\n"""'], {'color': '"""llama_turquoise"""'}), "(f'Retrieval entering {node.index_id}: {obj.__class__.__name__}\\n',\n color='llama_turquoise')\n", (7089, 7185), False, 'from llama_index.core.utils import print_text\n'), ((3794, 3822), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'result_output'}), '(text=result_output)\n', (3802, 3822), False, 'from llama_index.core.schema import BaseNode, IndexNode, NodeWithScore, QueryBundle, QueryType, TextNode\n'), ((5240, 5268), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'result_output'}), '(text=result_output)\n', (5248, 5268), False, 'from llama_index.core.schema import BaseNode, IndexNode, NodeWithScore, QueryBundle, QueryType, TextNode\n')] |
import dataclasses
import logging
from dataclasses import dataclass
from typing import Optional
from langchain.base_language import BaseLanguageModel
import llama_index
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser
from llama_index.node_parser.simple import SimpleNodeParser
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SimpleNodeParser.from_defaults(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata = dataclasses.replace(llm_metadata, context_window=context_window)
if num_output is not None:
llm_metadata = dataclasses.replace(llm_metadata, num_output=num_output)
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
node_parser: NodeParser
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[BaseLanguageModel] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
llama_logger=llama_logger,
callback_manager=callback_manager,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
)
callback_manager = callback_manager or CallbackManager([])
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or LLMPredictor()
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or OpenAIEmbedding()
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.get_llm_metadata(),
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[BaseLanguageModel] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or service_context.embed_model
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.get_llm_metadata(),
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or service_context.node_parser
if chunk_size is not None or chunk_overlap is not None:
node_parser = _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.langchain_helpers.chain_wrapper.LLMPredictor",
"llama_index.callbacks.base.CallbackManager",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults",
"llama_index.logger.LlamaLogger",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((709, 736), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (726, 736), False, 'import logging\n'), ((967, 1089), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (997, 1089), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1562, 1619), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1592, 1619), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((1375, 1439), 'dataclasses.replace', 'dataclasses.replace', (['llm_metadata'], {'context_window': 'context_window'}), '(llm_metadata, context_window=context_window)\n', (1394, 1439), False, 'import dataclasses\n'), ((1494, 1550), 'dataclasses.replace', 'dataclasses.replace', (['llm_metadata'], {'num_output': 'num_output'}), '(llm_metadata, num_output=num_output)\n', (1513, 1550), False, 'import dataclasses\n'), ((4762, 4781), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (4777, 4781), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((4958, 4979), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (4970, 4979), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n'), ((5021, 5035), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {}), '()\n', (5033, 5035), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n'), ((5190, 5207), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (5205, 5207), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((5718, 5731), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (5729, 5731), False, 'from llama_index.logger import LlamaLogger\n'), ((7437, 7458), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (7449, 7458), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n')] |
from __future__ import annotations
import os
try:
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage
)
except:
pass
from dataclasses import dataclass
def get_or_create_index_local(persist_dir = './storage', documents_dir :str= "data"):
if not os.path.exists(persist_dir):
documents = SimpleDirectoryReader(documents_dir).load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir = persist_dir)
else:
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
index = load_index_from_storage(storage_context)
return index
@dataclass
class LocalDirRag:
documents_dir:str = "data"
persist_dir:str = './storage'
def __post_init__(self):
try:
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
except:
raise ValueError("OpenAI API not installed. Please install it using pip install openai")
try:
import llama_index
except:
raise ValueError("llama_index not installed. Please install it using pip install llama_index")
if openai.api_key is None:
raise ValueError("OpenAI API key not found. Please set it as an environment variable OPENAI_API_KEY")
if len(openai.api_key) < 5:
raise ValueError("OpenAI API key not in correct format. Please set it as an environment variable OPENAI_API_KEY")
self.index = get_or_create_index_local(persist_dir = self.persist_dir,
documents_dir = self.documents_dir)
def ask(self, query:str):
engine = self.index.as_query_engine()
return engine.query(query)
@property
def chatbot(self):
engine = self.index.as_chat_engine()
return RagChatBot(engine)
def __call__(self, query:str):
return self.ask(query).response
class RagChatBot:
def __init__(self, engine):
self.engine = engine
def __call__(self,query):
return self.engine.chat(query)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader"
] | [((350, 377), 'os.path.exists', 'os.path.exists', (['persist_dir'], {}), '(persist_dir)\n', (364, 377), False, 'import os\n'), ((464, 506), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (495, 506), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((608, 661), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (636, 661), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((678, 718), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (701, 718), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((399, 435), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['documents_dir'], {}), '(documents_dir)\n', (420, 435), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n')] |
import os
from abc import abstractmethod
from collections import deque
from typing import Any, Deque, Dict, List, Optional, Union, cast
from llama_index.core.agent.types import (
BaseAgent,
BaseAgentWorker,
Task,
TaskStep,
TaskStepOutput,
)
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.callbacks import (
CallbackManager,
CBEventType,
EventPayload,
trace_method,
)
from llama_index.core.chat_engine.types import (
AGENT_CHAT_RESPONSE_TYPE,
AgentChatResponse,
ChatResponseMode,
StreamingAgentChatResponse,
)
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms.llm import LLM
from llama_index.core.memory import BaseMemory, ChatMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.tools.types import BaseTool
from llama_index.core.instrumentation.events.agent import (
AgentRunStepEndEvent,
AgentRunStepStartEvent,
AgentChatWithStepStartEvent,
AgentChatWithStepEndEvent,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class BaseAgentRunner(BaseAgent):
"""Base agent runner."""
@abstractmethod
def create_task(self, input: str, **kwargs: Any) -> Task:
"""Create task."""
@abstractmethod
def delete_task(
self,
task_id: str,
) -> None:
"""Delete task.
NOTE: this will not delete any previous executions from memory.
"""
@abstractmethod
def list_tasks(self, **kwargs: Any) -> List[Task]:
"""List tasks."""
@abstractmethod
def get_task(self, task_id: str, **kwargs: Any) -> Task:
"""Get task."""
@abstractmethod
def get_upcoming_steps(self, task_id: str, **kwargs: Any) -> List[TaskStep]:
"""Get upcoming steps."""
@abstractmethod
def get_completed_steps(self, task_id: str, **kwargs: Any) -> List[TaskStepOutput]:
"""Get completed steps."""
def get_completed_step(
self, task_id: str, step_id: str, **kwargs: Any
) -> TaskStepOutput:
"""Get completed step."""
# call get_completed_steps, and then find the right task
completed_steps = self.get_completed_steps(task_id, **kwargs)
for step_output in completed_steps:
if step_output.task_step.step_id == step_id:
return step_output
raise ValueError(f"Could not find step_id: {step_id}")
@abstractmethod
def run_step(
self,
task_id: str,
input: Optional[str] = None,
step: Optional[TaskStep] = None,
**kwargs: Any,
) -> TaskStepOutput:
"""Run step."""
@abstractmethod
async def arun_step(
self,
task_id: str,
input: Optional[str] = None,
step: Optional[TaskStep] = None,
**kwargs: Any,
) -> TaskStepOutput:
"""Run step (async)."""
@abstractmethod
def stream_step(
self,
task_id: str,
input: Optional[str] = None,
step: Optional[TaskStep] = None,
**kwargs: Any,
) -> TaskStepOutput:
"""Run step (stream)."""
@abstractmethod
async def astream_step(
self,
task_id: str,
input: Optional[str] = None,
step: Optional[TaskStep] = None,
**kwargs: Any,
) -> TaskStepOutput:
"""Run step (async stream)."""
@abstractmethod
def finalize_response(
self,
task_id: str,
step_output: Optional[TaskStepOutput] = None,
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Finalize response."""
@abstractmethod
def undo_step(self, task_id: str) -> None:
"""Undo previous step."""
raise NotImplementedError("undo_step not implemented")
def validate_step_from_args(
task_id: str, input: Optional[str] = None, step: Optional[Any] = None, **kwargs: Any
) -> Optional[TaskStep]:
"""Validate step from args."""
if step is not None:
if input is not None:
raise ValueError("Cannot specify both `step` and `input`")
if not isinstance(step, TaskStep):
raise ValueError(f"step must be TaskStep: {step}")
return step
else:
return None
class TaskState(BaseModel):
"""Task state."""
task: Task = Field(..., description="Task.")
step_queue: Deque[TaskStep] = Field(
default_factory=deque, description="Task step queue."
)
completed_steps: List[TaskStepOutput] = Field(
default_factory=list, description="Completed step outputs."
)
class AgentState(BaseModel):
"""Agent state."""
task_dict: Dict[str, TaskState] = Field(
default_factory=dict, description="Task dictionary."
)
def get_task(self, task_id: str) -> Task:
"""Get task state."""
return self.task_dict[task_id].task
def get_completed_steps(self, task_id: str) -> List[TaskStepOutput]:
"""Get completed steps."""
return self.task_dict[task_id].completed_steps
def get_step_queue(self, task_id: str) -> Deque[TaskStep]:
"""Get step queue."""
return self.task_dict[task_id].step_queue
def reset(self) -> None:
"""Reset."""
self.task_dict = {}
class AgentRunner(BaseAgentRunner):
"""Agent runner.
Top-level agent orchestrator that can create tasks, run each step in a task,
or run a task e2e. Stores state and keeps track of tasks.
Args:
agent_worker (BaseAgentWorker): step executor
chat_history (Optional[List[ChatMessage]], optional): chat history. Defaults to None.
state (Optional[AgentState], optional): agent state. Defaults to None.
memory (Optional[BaseMemory], optional): memory. Defaults to None.
llm (Optional[LLM], optional): LLM. Defaults to None.
callback_manager (Optional[CallbackManager], optional): callback manager. Defaults to None.
init_task_state_kwargs (Optional[dict], optional): init task state kwargs. Defaults to None.
"""
# # TODO: implement this in Pydantic
def __init__(
self,
agent_worker: BaseAgentWorker,
chat_history: Optional[List[ChatMessage]] = None,
state: Optional[AgentState] = None,
memory: Optional[BaseMemory] = None,
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
init_task_state_kwargs: Optional[dict] = None,
delete_task_on_finish: bool = False,
default_tool_choice: str = "auto",
verbose: bool = False,
) -> None:
"""Initialize."""
self.agent_worker = agent_worker
self.state = state or AgentState()
self.memory = memory or ChatMemoryBuffer.from_defaults(chat_history, llm=llm)
# get and set callback manager
if callback_manager is not None:
self.agent_worker.set_callback_manager(callback_manager)
self.callback_manager = callback_manager
else:
# TODO: This is *temporary*
# Stopgap before having a callback on the BaseAgentWorker interface.
# Doing that requires a bit more refactoring to make sure existing code
# doesn't break.
if hasattr(self.agent_worker, "callback_manager"):
self.callback_manager = (
self.agent_worker.callback_manager or CallbackManager()
)
else:
self.callback_manager = CallbackManager()
self.init_task_state_kwargs = init_task_state_kwargs or {}
self.delete_task_on_finish = delete_task_on_finish
self.default_tool_choice = default_tool_choice
self.verbose = verbose
@staticmethod
def from_llm(
tools: Optional[List[BaseTool]] = None,
llm: Optional[LLM] = None,
**kwargs: Any,
) -> "AgentRunner":
from llama_index.core.agent import ReActAgent
if os.getenv("IS_TESTING"):
return ReActAgent.from_tools(
tools=tools,
llm=llm,
**kwargs,
)
try:
from llama_index.llms.openai import OpenAI # pants: no-infer-dep
from llama_index.llms.openai.utils import (
is_function_calling_model,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"`llama-index-llms-openai` package not found. Please "
"install by running `pip install llama-index-llms-openai`."
)
if isinstance(llm, OpenAI) and is_function_calling_model(llm.model):
from llama_index.agent.openai import OpenAIAgent # pants: no-infer-dep
return OpenAIAgent.from_tools(
tools=tools,
llm=llm,
**kwargs,
)
else:
return ReActAgent.from_tools(
tools=tools,
llm=llm,
**kwargs,
)
@property
def chat_history(self) -> List[ChatMessage]:
return self.memory.get_all()
def reset(self) -> None:
self.memory.reset()
self.state.reset()
def create_task(self, input: str, **kwargs: Any) -> Task:
"""Create task."""
if not self.init_task_state_kwargs:
extra_state = kwargs.pop("extra_state", {})
else:
if "extra_state" in kwargs:
raise ValueError(
"Cannot specify both `extra_state` and `init_task_state_kwargs`"
)
else:
extra_state = self.init_task_state_kwargs
callback_manager = kwargs.pop("callback_manager", self.callback_manager)
task = Task(
input=input,
memory=self.memory,
extra_state=extra_state,
callback_manager=callback_manager,
**kwargs,
)
# # put input into memory
# self.memory.put(ChatMessage(content=input, role=MessageRole.USER))
# get initial step from task, and put it in the step queue
initial_step = self.agent_worker.initialize_step(task)
task_state = TaskState(
task=task,
step_queue=deque([initial_step]),
)
# add it to state
self.state.task_dict[task.task_id] = task_state
return task
def delete_task(
self,
task_id: str,
) -> None:
"""Delete task.
NOTE: this will not delete any previous executions from memory.
"""
self.state.task_dict.pop(task_id)
def list_tasks(self, **kwargs: Any) -> List[Task]:
"""List tasks."""
return list(self.state.task_dict.values())
def get_task(self, task_id: str, **kwargs: Any) -> Task:
"""Get task."""
return self.state.get_task(task_id)
def get_upcoming_steps(self, task_id: str, **kwargs: Any) -> List[TaskStep]:
"""Get upcoming steps."""
return list(self.state.get_step_queue(task_id))
def get_completed_steps(self, task_id: str, **kwargs: Any) -> List[TaskStepOutput]:
"""Get completed steps."""
return self.state.get_completed_steps(task_id)
@dispatcher.span
def _run_step(
self,
task_id: str,
step: Optional[TaskStep] = None,
input: Optional[str] = None,
mode: ChatResponseMode = ChatResponseMode.WAIT,
**kwargs: Any,
) -> TaskStepOutput:
"""Execute step."""
dispatcher.event(AgentRunStepStartEvent())
task = self.state.get_task(task_id)
step_queue = self.state.get_step_queue(task_id)
step = step or step_queue.popleft()
if input is not None:
step.input = input
if self.verbose:
print(f"> Running step {step.step_id}. Step input: {step.input}")
# TODO: figure out if you can dynamically swap in different step executors
# not clear when you would do that by theoretically possible
if mode == ChatResponseMode.WAIT:
cur_step_output = self.agent_worker.run_step(step, task, **kwargs)
elif mode == ChatResponseMode.STREAM:
cur_step_output = self.agent_worker.stream_step(step, task, **kwargs)
else:
raise ValueError(f"Invalid mode: {mode}")
# append cur_step_output next steps to queue
next_steps = cur_step_output.next_steps
step_queue.extend(next_steps)
# add cur_step_output to completed steps
completed_steps = self.state.get_completed_steps(task_id)
completed_steps.append(cur_step_output)
dispatcher.event(AgentRunStepEndEvent())
return cur_step_output
async def _arun_step(
self,
task_id: str,
step: Optional[TaskStep] = None,
input: Optional[str] = None,
mode: ChatResponseMode = ChatResponseMode.WAIT,
**kwargs: Any,
) -> TaskStepOutput:
"""Execute step."""
task = self.state.get_task(task_id)
step_queue = self.state.get_step_queue(task_id)
step = step or step_queue.popleft()
if input is not None:
step.input = input
if self.verbose:
print(f"> Running step {step.step_id}. Step input: {step.input}")
# TODO: figure out if you can dynamically swap in different step executors
# not clear when you would do that by theoretically possible
if mode == ChatResponseMode.WAIT:
cur_step_output = await self.agent_worker.arun_step(step, task, **kwargs)
elif mode == ChatResponseMode.STREAM:
cur_step_output = await self.agent_worker.astream_step(step, task, **kwargs)
else:
raise ValueError(f"Invalid mode: {mode}")
# append cur_step_output next steps to queue
next_steps = cur_step_output.next_steps
step_queue.extend(next_steps)
# add cur_step_output to completed steps
completed_steps = self.state.get_completed_steps(task_id)
completed_steps.append(cur_step_output)
return cur_step_output
def run_step(
self,
task_id: str,
input: Optional[str] = None,
step: Optional[TaskStep] = None,
**kwargs: Any,
) -> TaskStepOutput:
"""Run step."""
step = validate_step_from_args(task_id, input, step, **kwargs)
return self._run_step(
task_id, step, input=input, mode=ChatResponseMode.WAIT, **kwargs
)
async def arun_step(
self,
task_id: str,
input: Optional[str] = None,
step: Optional[TaskStep] = None,
**kwargs: Any,
) -> TaskStepOutput:
"""Run step (async)."""
step = validate_step_from_args(task_id, input, step, **kwargs)
return await self._arun_step(
task_id, step, input=input, mode=ChatResponseMode.WAIT, **kwargs
)
def stream_step(
self,
task_id: str,
input: Optional[str] = None,
step: Optional[TaskStep] = None,
**kwargs: Any,
) -> TaskStepOutput:
"""Run step (stream)."""
step = validate_step_from_args(task_id, input, step, **kwargs)
return self._run_step(
task_id, step, input=input, mode=ChatResponseMode.STREAM, **kwargs
)
async def astream_step(
self,
task_id: str,
input: Optional[str] = None,
step: Optional[TaskStep] = None,
**kwargs: Any,
) -> TaskStepOutput:
"""Run step (async stream)."""
step = validate_step_from_args(task_id, input, step, **kwargs)
return await self._arun_step(
task_id, step, input=input, mode=ChatResponseMode.STREAM, **kwargs
)
def finalize_response(
self,
task_id: str,
step_output: Optional[TaskStepOutput] = None,
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Finalize response."""
if step_output is None:
step_output = self.state.get_completed_steps(task_id)[-1]
if not step_output.is_last:
raise ValueError(
"finalize_response can only be called on the last step output"
)
if not isinstance(
step_output.output,
(AgentChatResponse, StreamingAgentChatResponse),
):
raise ValueError(
"When `is_last` is True, cur_step_output.output must be "
f"AGENT_CHAT_RESPONSE_TYPE: {step_output.output}"
)
# finalize task
self.agent_worker.finalize_task(self.state.get_task(task_id))
if self.delete_task_on_finish:
self.delete_task(task_id)
return cast(AGENT_CHAT_RESPONSE_TYPE, step_output.output)
@dispatcher.span
def _chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Union[str, dict] = "auto",
mode: ChatResponseMode = ChatResponseMode.WAIT,
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Chat with step executor."""
if chat_history is not None:
self.memory.set(chat_history)
task = self.create_task(message)
result_output = None
dispatcher.event(AgentChatWithStepStartEvent())
while True:
# pass step queue in as argument, assume step executor is stateless
cur_step_output = self._run_step(
task.task_id, mode=mode, tool_choice=tool_choice
)
if cur_step_output.is_last:
result_output = cur_step_output
break
# ensure tool_choice does not cause endless loops
tool_choice = "auto"
result = self.finalize_response(
task.task_id,
result_output,
)
dispatcher.event(AgentChatWithStepEndEvent())
return result
async def _achat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Union[str, dict] = "auto",
mode: ChatResponseMode = ChatResponseMode.WAIT,
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Chat with step executor."""
if chat_history is not None:
self.memory.set(chat_history)
task = self.create_task(message)
result_output = None
while True:
# pass step queue in as argument, assume step executor is stateless
cur_step_output = await self._arun_step(
task.task_id, mode=mode, tool_choice=tool_choice
)
if cur_step_output.is_last:
result_output = cur_step_output
break
# ensure tool_choice does not cause endless loops
tool_choice = "auto"
return self.finalize_response(
task.task_id,
result_output,
)
@trace_method("chat")
def chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Optional[Union[str, dict]] = None,
) -> AgentChatResponse:
# override tool choice is provided as input.
if tool_choice is None:
tool_choice = self.default_tool_choice
with self.callback_manager.event(
CBEventType.AGENT_STEP,
payload={EventPayload.MESSAGES: [message]},
) as e:
chat_response = self._chat(
message=message,
chat_history=chat_history,
tool_choice=tool_choice,
mode=ChatResponseMode.WAIT,
)
assert isinstance(chat_response, AgentChatResponse)
e.on_end(payload={EventPayload.RESPONSE: chat_response})
return chat_response
@trace_method("chat")
async def achat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Optional[Union[str, dict]] = None,
) -> AgentChatResponse:
# override tool choice is provided as input.
if tool_choice is None:
tool_choice = self.default_tool_choice
with self.callback_manager.event(
CBEventType.AGENT_STEP,
payload={EventPayload.MESSAGES: [message]},
) as e:
chat_response = await self._achat(
message=message,
chat_history=chat_history,
tool_choice=tool_choice,
mode=ChatResponseMode.WAIT,
)
assert isinstance(chat_response, AgentChatResponse)
e.on_end(payload={EventPayload.RESPONSE: chat_response})
return chat_response
@dispatcher.span
def stream_chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Optional[Union[str, dict]] = None,
) -> StreamingAgentChatResponse:
# override tool choice is provided as input.
if tool_choice is None:
tool_choice = self.default_tool_choice
chat_response = self._chat(
message=message,
chat_history=chat_history,
tool_choice=tool_choice,
mode=ChatResponseMode.STREAM,
)
assert isinstance(chat_response, StreamingAgentChatResponse)
return chat_response
@trace_method("chat")
async def astream_chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Optional[Union[str, dict]] = None,
) -> StreamingAgentChatResponse:
# override tool choice is provided as input.
if tool_choice is None:
tool_choice = self.default_tool_choice
with self.callback_manager.event(
CBEventType.AGENT_STEP,
payload={EventPayload.MESSAGES: [message]},
) as e:
chat_response = await self._achat(
message, chat_history, tool_choice, mode=ChatResponseMode.STREAM
)
assert isinstance(chat_response, StreamingAgentChatResponse)
e.on_end(payload={EventPayload.RESPONSE: chat_response})
return chat_response
def undo_step(self, task_id: str) -> None:
"""Undo previous step."""
raise NotImplementedError("undo_step not implemented")
| [
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.agent.ReActAgent.from_tools",
"llama_index.core.agent.types.Task",
"llama_index.llms.openai.utils.is_function_calling_model",
"llama_index.agent.openai.OpenAIAgent.from_tools",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.instrumentation.events.agent.AgentChatWithStepEndEvent",
"llama_index.core.instrumentation.events.agent.AgentRunStepEndEvent",
"llama_index.core.instrumentation.events.agent.AgentRunStepStartEvent",
"llama_index.core.instrumentation.events.agent.AgentChatWithStepStartEvent",
"llama_index.core.callbacks.trace_method",
"llama_index.core.memory.ChatMemoryBuffer.from_defaults"
] | [((1114, 1149), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1139, 1149), True, 'import llama_index.core.instrumentation as instrument\n'), ((4349, 4380), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task."""'}), "(..., description='Task.')\n", (4354, 4380), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4415, 4475), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'deque', 'description': '"""Task step queue."""'}), "(default_factory=deque, description='Task step queue.')\n", (4420, 4475), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4534, 4600), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Completed step outputs."""'}), "(default_factory=list, description='Completed step outputs.')\n", (4539, 4600), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4708, 4767), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Task dictionary."""'}), "(default_factory=dict, description='Task dictionary.')\n", (4713, 4767), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((18936, 18956), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (18948, 18956), False, 'from llama_index.core.callbacks import CallbackManager, CBEventType, EventPayload, trace_method\n'), ((19818, 19838), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (19830, 19838), False, 'from llama_index.core.callbacks import CallbackManager, CBEventType, EventPayload, trace_method\n'), ((21371, 21391), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (21383, 21391), False, 'from llama_index.core.callbacks import CallbackManager, CBEventType, EventPayload, trace_method\n'), ((7990, 8013), 'os.getenv', 'os.getenv', (['"""IS_TESTING"""'], {}), "('IS_TESTING')\n", (7999, 8013), False, 'import os\n'), ((9789, 9900), 'llama_index.core.agent.types.Task', 'Task', ([], {'input': 'input', 'memory': 'self.memory', 'extra_state': 'extra_state', 'callback_manager': 'callback_manager'}), '(input=input, memory=self.memory, extra_state=extra_state,\n callback_manager=callback_manager, **kwargs)\n', (9793, 9900), False, 'from llama_index.core.agent.types import BaseAgent, BaseAgentWorker, Task, TaskStep, TaskStepOutput\n'), ((16764, 16814), 'typing.cast', 'cast', (['AGENT_CHAT_RESPONSE_TYPE', 'step_output.output'], {}), '(AGENT_CHAT_RESPONSE_TYPE, step_output.output)\n', (16768, 16814), False, 'from typing import Any, Deque, Dict, List, Optional, Union, cast\n'), ((6765, 6818), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', (['chat_history'], {'llm': 'llm'}), '(chat_history, llm=llm)\n', (6795, 6818), False, 'from llama_index.core.memory import BaseMemory, ChatMemoryBuffer\n'), ((8034, 8087), 'llama_index.core.agent.ReActAgent.from_tools', 'ReActAgent.from_tools', ([], {'tools': 'tools', 'llm': 'llm'}), '(tools=tools, llm=llm, **kwargs)\n', (8055, 8087), False, 'from llama_index.core.agent import ReActAgent\n'), ((8639, 8675), 'llama_index.llms.openai.utils.is_function_calling_model', 'is_function_calling_model', (['llm.model'], {}), '(llm.model)\n', (8664, 8675), False, 'from llama_index.llms.openai.utils import is_function_calling_model\n'), ((8781, 8835), 'llama_index.agent.openai.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', ([], {'tools': 'tools', 'llm': 'llm'}), '(tools=tools, llm=llm, **kwargs)\n', (8803, 8835), False, 'from llama_index.agent.openai import OpenAIAgent\n'), ((8932, 8985), 'llama_index.core.agent.ReActAgent.from_tools', 'ReActAgent.from_tools', ([], {'tools': 'tools', 'llm': 'llm'}), '(tools=tools, llm=llm, **kwargs)\n', (8953, 8985), False, 'from llama_index.core.agent import ReActAgent\n'), ((11575, 11599), 'llama_index.core.instrumentation.events.agent.AgentRunStepStartEvent', 'AgentRunStepStartEvent', ([], {}), '()\n', (11597, 11599), False, 'from llama_index.core.instrumentation.events.agent import AgentRunStepEndEvent, AgentRunStepStartEvent, AgentChatWithStepStartEvent, AgentChatWithStepEndEvent\n'), ((12710, 12732), 'llama_index.core.instrumentation.events.agent.AgentRunStepEndEvent', 'AgentRunStepEndEvent', ([], {}), '()\n', (12730, 12732), False, 'from llama_index.core.instrumentation.events.agent import AgentRunStepEndEvent, AgentRunStepStartEvent, AgentChatWithStepStartEvent, AgentChatWithStepEndEvent\n'), ((17299, 17328), 'llama_index.core.instrumentation.events.agent.AgentChatWithStepStartEvent', 'AgentChatWithStepStartEvent', ([], {}), '()\n', (17326, 17328), False, 'from llama_index.core.instrumentation.events.agent import AgentRunStepEndEvent, AgentRunStepStartEvent, AgentChatWithStepStartEvent, AgentChatWithStepEndEvent\n'), ((17892, 17919), 'llama_index.core.instrumentation.events.agent.AgentChatWithStepEndEvent', 'AgentChatWithStepEndEvent', ([], {}), '()\n', (17917, 17919), False, 'from llama_index.core.instrumentation.events.agent import AgentRunStepEndEvent, AgentRunStepStartEvent, AgentChatWithStepStartEvent, AgentChatWithStepEndEvent\n'), ((7527, 7544), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (7542, 7544), False, 'from llama_index.core.callbacks import CallbackManager, CBEventType, EventPayload, trace_method\n'), ((10288, 10309), 'collections.deque', 'deque', (['[initial_step]'], {}), '([initial_step])\n', (10293, 10309), False, 'from collections import deque\n'), ((7433, 7450), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (7448, 7450), False, 'from llama_index.core.callbacks import CallbackManager, CBEventType, EventPayload, trace_method\n')] |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
ResponseMode,
get_response_synthesizer,
)
from llama_index.core.schema import NodeWithScore, QueryBundle
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
llm_from_settings_or_context,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class RetrieverQueryEngine(BaseQueryEngine):
"""Retriever query engine.
Args:
retriever (BaseRetriever): A retriever object.
response_synthesizer (Optional[BaseSynthesizer]): A BaseSynthesizer
object.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
def __init__(
self,
retriever: BaseRetriever,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._retriever = retriever
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm_from_settings_or_context(Settings, retriever.get_service_context()),
callback_manager=callback_manager
or callback_manager_from_settings_or_context(
Settings, retriever.get_service_context()
),
)
self._node_postprocessors = node_postprocessors or []
callback_manager = (
callback_manager or self._response_synthesizer.callback_manager
)
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = callback_manager
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {"response_synthesizer": self._response_synthesizer}
@classmethod
def from_args(
cls,
retriever: BaseRetriever,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
# response synthesizer args
response_mode: ResponseMode = ResponseMode.COMPACT,
text_qa_template: Optional[BasePromptTemplate] = None,
refine_template: Optional[BasePromptTemplate] = None,
summary_template: Optional[BasePromptTemplate] = None,
simple_template: Optional[BasePromptTemplate] = None,
output_cls: Optional[BaseModel] = None,
use_async: bool = False,
streaming: bool = False,
# deprecated
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> "RetrieverQueryEngine":
"""Initialize a RetrieverQueryEngine object.".
Args:
retriever (BaseRetriever): A retriever object.
service_context (Optional[ServiceContext]): A ServiceContext object.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
verbose (bool): Whether to print out debug info.
response_mode (ResponseMode): A ResponseMode object.
text_qa_template (Optional[BasePromptTemplate]): A BasePromptTemplate
object.
refine_template (Optional[BasePromptTemplate]): A BasePromptTemplate object.
simple_template (Optional[BasePromptTemplate]): A BasePromptTemplate object.
use_async (bool): Whether to use async.
streaming (bool): Whether to use streaming.
optimizer (Optional[BaseTokenUsageOptimizer]): A BaseTokenUsageOptimizer
object.
"""
llm = llm or llm_from_settings_or_context(Settings, service_context)
response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
service_context=service_context,
text_qa_template=text_qa_template,
refine_template=refine_template,
summary_template=summary_template,
simple_template=simple_template,
response_mode=response_mode,
output_cls=output_cls,
use_async=use_async,
streaming=streaming,
)
callback_manager = callback_manager_from_settings_or_context(
Settings, service_context
)
return cls(
retriever=retriever,
response_synthesizer=response_synthesizer,
callback_manager=callback_manager,
node_postprocessors=node_postprocessors,
)
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
def with_retriever(self, retriever: BaseRetriever) -> "RetrieverQueryEngine":
return RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=self._response_synthesizer,
callback_manager=self.callback_manager,
node_postprocessors=self._node_postprocessors,
)
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
return self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
return await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
@dispatcher.span
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = self.retrieve(query_bundle)
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@dispatcher.span
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = await self.aretrieve(query_bundle)
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@property
def retriever(self) -> BaseRetriever:
"""Get the retriever object."""
return self._retriever
| [
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.instrumentation.get_dispatcher"
] | [((1112, 1147), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1137, 1147), True, 'import llama_index.core.instrumentation as instrument\n'), ((5090, 5158), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5131, 5158), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((4522, 4577), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4550, 4577), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((4634, 4946), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm', 'service_context': 'service_context', 'text_qa_template': 'text_qa_template', 'refine_template': 'refine_template', 'summary_template': 'summary_template', 'simple_template': 'simple_template', 'response_mode': 'response_mode', 'output_cls': 'output_cls', 'use_async': 'use_async', 'streaming': 'streaming'}), '(llm=llm, service_context=service_context,\n text_qa_template=text_qa_template, refine_template=refine_template,\n summary_template=summary_template, simple_template=simple_template,\n response_mode=response_mode, output_cls=output_cls, use_async=use_async,\n streaming=streaming)\n', (4658, 4946), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, ResponseMode, get_response_synthesizer\n')] |
from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.core
llama_index.core.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
try:
from llama_index.callbacks.wandb import (
WandbCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"WandbCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-wandb`"
)
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
try:
from llama_index.callbacks.openinference import (
OpenInferenceCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"OpenInferenceCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-openinference`"
)
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
try:
from llama_index.callbacks.arize_phoenix import (
arize_phoenix_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArizePhoenixCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-arize-phoenix`"
)
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
try:
from llama_index.callbacks.honeyhive import (
honeyhive_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"HoneyHiveCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-honeyhive`"
)
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
try:
from llama_index.callbacks.promptlayer import (
PromptLayerHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"PromptLayerHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-promptlayer`"
)
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
try:
from llama_index.callbacks.deepeval import (
deepeval_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"DeepEvalCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-deepeval`"
)
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
try:
from llama_index.callbacks.argilla import (
argilla_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArgillaCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-argilla`"
)
handler = argilla_callback_handler(**eval_params)
elif eval_mode == "langfuse":
try:
from llama_index.callbacks.langfuse import (
langfuse_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"LangfuseCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-langfuse`"
)
handler = langfuse_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.openinference.OpenInferenceCallbackHandler",
"llama_index.callbacks.promptlayer.PromptLayerHandler",
"llama_index.callbacks.deepeval.deepeval_callback_handler",
"llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler",
"llama_index.callbacks.wandb.WandbCallbackHandler",
"llama_index.callbacks.honeyhive.honeyhive_callback_handler",
"llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.argilla.argilla_callback_handler",
"llama_index.callbacks.langfuse.langfuse_callback_handler"
] | [((941, 976), 'llama_index.callbacks.wandb.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (961, 976), False, 'from llama_index.callbacks.wandb import WandbCallbackHandler\n'), ((1424, 1467), 'llama_index.callbacks.openinference.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1452, 1467), False, 'from llama_index.callbacks.openinference import OpenInferenceCallbackHandler\n'), ((1916, 1961), 'llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1946, 1961), False, 'from llama_index.callbacks.arize_phoenix import arize_phoenix_callback_handler\n'), ((2390, 2431), 'llama_index.callbacks.honeyhive.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (2416, 2431), False, 'from llama_index.callbacks.honeyhive import honeyhive_callback_handler\n'), ((2852, 2885), 'llama_index.callbacks.promptlayer.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (2870, 2885), False, 'from llama_index.callbacks.promptlayer import PromptLayerHandler\n'), ((3309, 3349), 'llama_index.callbacks.deepeval.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (3334, 3349), False, 'from llama_index.callbacks.deepeval import deepeval_callback_handler\n'), ((3400, 3431), 'llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (3416, 3431), False, 'from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((3850, 3889), 'llama_index.callbacks.argilla.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (3874, 3889), False, 'from llama_index.callbacks.argilla import argilla_callback_handler\n'), ((4313, 4353), 'llama_index.callbacks.langfuse.langfuse_callback_handler', 'langfuse_callback_handler', ([], {}), '(**eval_params)\n', (4338, 4353), False, 'from llama_index.callbacks.langfuse import langfuse_callback_handler\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import ( # type: ignore
BaseModel,
Field,
PrivateAttr,
)
from llama_index.legacy.indices.service_context import ServiceContext
from llama_index.legacy.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
google_service_context = ServiceContext.from_defaults(
# Avoids instantiating OpenAI as the default model.
llm=None,
# Avoids instantiating HuggingFace as the default model.
embed_model=None,
)
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id")
index = VectorStoreIndex.from_vector_store(
google_vector_store,
service_context=google_service_context)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(cls, *, corpus_id: str) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id: ID of an existing corpus on Google's server.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(corpus_id=corpus_id, client=client)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.vector_stores.types.VectorStoreQuery`.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
return VectorStoreQueryResult(
nodes=[
TextNode(
text=chunk.chunk.data.string_value,
id_=_extract_chunk_id(chunk.chunk.name),
)
for chunk in relevant_chunks
],
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.create_corpus",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_document",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.Config",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.delete_document",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.set_config",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.indices.service_context.ServiceContext.from_defaults",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_corpus",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str",
"llama_index.legacy.schema.RelatedNodeInfo"
] | [((888, 915), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (905, 915), False, 'import logging\n'), ((1081, 1137), 'llama_index.legacy.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'None'}), '(llm=None, embed_model=None)\n', (1109, 1137), False, 'from llama_index.legacy.indices.service_context import ServiceContext\n'), ((3165, 3187), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (3178, 3187), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((3192, 3217), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (3209, 3217), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4232, 4250), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4237, 4250), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4340, 4353), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4351, 4353), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((5609, 5642), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (5640, 5642), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7309, 7342), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (7340, 7342), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7419, 7510), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (7439, 7510), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7544, 7587), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (7570, 7587), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9679, 9726), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (9683, 9726), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11450, 11497), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (11454, 11497), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11506, 11597), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (11528, 11597), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((13116, 13163), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13120, 13163), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((15004, 15043), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (15030, 15043), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5654, 5707), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (5671, 5707), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9932, 10021), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (9951, 10021), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((15668, 15708), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (15683, 15708), False, 'from llama_index.legacy.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((7384, 7396), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7394, 7396), False, 'import uuid\n')] |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
import llama_index.legacy
from llama_index.legacy.bridge.pydantic import BaseModel
from llama_index.legacy.callbacks.base import CallbackManager
from llama_index.legacy.core.embeddings.base import BaseEmbedding
from llama_index.legacy.indices.prompt_helper import PromptHelper
from llama_index.legacy.llm_predictor import LLMPredictor
from llama_index.legacy.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.llms.utils import LLMType, resolve_llm
from llama_index.legacy.logger import LlamaLogger
from llama_index.legacy.node_parser.interface import NodeParser, TextSplitter
from llama_index.legacy.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.legacy.prompts.base import BasePromptTemplate
from llama_index.legacy.schema import TransformComponent
from llama_index.legacy.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.legacy.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.legacy.global_service_context is not None:
return cls.from_service_context(
llama_index.legacy.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.legacy.embeddings.loading import load_embed_model
from llama_index.legacy.extractors.loading import load_extractor
from llama_index.legacy.llm_predictor.loading import load_predictor
from llama_index.legacy.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.legacy.global_service_context = service_context
| [
"llama_index.legacy.node_parser.loading.load_parser",
"llama_index.legacy.callbacks.base.CallbackManager",
"llama_index.legacy.llm_predictor.LLMPredictor",
"llama_index.legacy.embeddings.loading.load_embed_model",
"llama_index.legacy.logger.LlamaLogger",
"llama_index.legacy.llms.utils.resolve_llm",
"llama_index.legacy.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.legacy.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.legacy.embeddings.utils.resolve_embed_model",
"llama_index.legacy.extractors.loading.load_extractor",
"llama_index.legacy.llm_predictor.loading.load_predictor"
] | [((1067, 1094), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1084, 1094), False, 'import logging\n'), ((1869, 1926), 'llama_index.legacy.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1899, 1926), False, 'from llama_index.legacy.indices.prompt_helper import PromptHelper\n'), ((5247, 5275), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5251, 5275), False, 'from typing import Any, List, Optional, cast\n'), ((7708, 7740), 'llama_index.legacy.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7727, 7740), False, 'from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10159, 10187), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10163, 10187), False, 'from typing import Any, List, Optional, cast\n'), ((11403, 11435), 'llama_index.legacy.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11422, 11435), False, 'from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14605, 14655), 'llama_index.legacy.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14619, 14655), False, 'from llama_index.legacy.llm_predictor.loading import load_predictor\n'), ((14679, 14729), 'llama_index.legacy.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14695, 14729), False, 'from llama_index.legacy.embeddings.loading import load_embed_model\n'), ((14755, 14813), 'llama_index.legacy.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14777, 14813), False, 'from llama_index.legacy.indices.prompt_helper import PromptHelper\n'), ((6452, 6471), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6467, 6471), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((6639, 6655), 'llama_index.legacy.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6650, 6655), False, 'from llama_index.legacy.llms.utils import LLMType, resolve_llm\n'), ((7087, 7153), 'llama_index.legacy.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (7099, 7153), False, 'from llama_index.legacy.llm_predictor import LLMPredictor\n'), ((8616, 8629), 'llama_index.legacy.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8627, 8629), False, 'from llama_index.legacy.logger import LlamaLogger\n'), ((10698, 10714), 'llama_index.legacy.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10709, 10714), False, 'from llama_index.legacy.llms.utils import LLMType, resolve_llm\n'), ((10743, 10764), 'llama_index.legacy.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10755, 10764), False, 'from llama_index.legacy.llm_predictor import LLMPredictor\n'), ((1468, 1485), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1483, 1485), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((14989, 15011), 'llama_index.legacy.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (15000, 15011), False, 'from llama_index.legacy.node_parser.loading import load_parser\n'), ((15083, 15108), 'llama_index.legacy.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (15097, 15108), False, 'from llama_index.legacy.extractors.loading import load_extractor\n')] |
"""Astra DB."""
from typing import Any, List, Optional
import llama_index.core
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class AstraDBReader(BaseReader):
"""Astra DB reader.
Retrieve documents from an Astra DB Instance.
Args:
collection_name (str): collection name to use. If not existing, it will be created.
token (str): The Astra DB Application Token to use.
api_endpoint (str): The Astra DB JSON API endpoint for your database.
embedding_dimension (int): Length of the embedding vectors in use.
namespace (Optional[str]): The namespace to use. If not provided, 'default_keyspace'
client (Optional[Any]): Astra DB client to use. If not provided, one will be created.
"""
def __init__(
self,
*,
collection_name: str,
token: str,
api_endpoint: str,
embedding_dimension: int,
namespace: Optional[str] = None,
client: Optional[Any] = None,
) -> None:
"""Initialize with parameters."""
import_err_msg = (
"`astrapy` package not found, please run `pip install --upgrade astrapy`"
)
# Try to import astrapy for use
try:
from astrapy.db import AstraDB
except ImportError:
raise ImportError(import_err_msg)
if client is not None:
self._client = client.copy()
self._client.set_caller(
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
else:
# Build the Astra DB object
self._client = AstraDB(
api_endpoint=api_endpoint,
token=token,
namespace=namespace,
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
self._collection = self._client.create_collection(
collection_name=collection_name, dimension=embedding_dimension
)
def load_data(self, vector: List[float], limit: int = 10, **kwargs: Any) -> Any:
"""Load data from Astra DB.
Args:
vector (Any): Query
limit (int): Number of results to return.
kwargs (Any): Additional arguments to pass to the Astra DB query.
Returns:
List[Document]: A list of documents.
"""
results = self._collection.vector_find(vector, limit=limit, **kwargs)
documents: List[Document] = []
for result in results:
document = Document(
doc_id=result["_id"],
text=result["content"],
embedding=result["$vector"],
)
documents.append(document)
return documents
| [
"llama_index.core.schema.Document"
] | [((2732, 2820), 'llama_index.core.schema.Document', 'Document', ([], {'doc_id': "result['_id']", 'text': "result['content']", 'embedding': "result['$vector']"}), "(doc_id=result['_id'], text=result['content'], embedding=result[\n '$vector'])\n", (2740, 2820), False, 'from llama_index.core.schema import Document\n')] |
import os
from django.conf import settings
from django.http import JsonResponse
from django.views import View
import llama_index
from llama_index import (StorageContext,
load_index_from_storage,
ServiceContext,
set_global_service_context,
get_response_synthesizer)
from llama_index.llms import OpenAI
from llama_index.retrievers import VectorIndexRetriever,SummaryIndexLLMRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.callbacks import CallbackManager, WandbCallbackHandler
from llama_index import set_global_handler,global_handler
from llama_index.prompts import PromptTemplate
from IPython.display import Markdown, display
llama_index.set_global_handler("simple")
# define LLM
llm = OpenAI(model="gpt-3.5-turbo", temperature=0, max_tokens=200, api_key=os.getenv("OPENAI_API_KEY"))
# configure service context
service_context = ServiceContext.from_defaults(llm=llm)
set_global_service_context(service_context)
# define prompt viewing function
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
index_file_path = os.path.join('../indexed_documents')
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=index_file_path)
# load index
index = load_index_from_storage(storage_context)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=2,
)
nodes = retriever.retrieve("国有资产管理的问题有哪些?")
# configure response synthesizer
response_synthesizer = get_response_synthesizer()
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=0.9)
]
)
new_summary_tmpl_str = (
"The necessary materials and information are provided below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Utilizing the provided materials and your knowledge, "
"compose a detailed report.\n"
"Task: {query_str}\n"
"Answer: "
)
new_summary_tmpl = PromptTemplate(new_summary_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": new_summary_tmpl}
)
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
| [
"llama_index.get_response_synthesizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.prompts.PromptTemplate",
"llama_index.set_global_handler",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.load_index_from_storage",
"llama_index.indices.postprocessor.SimilarityPostprocessor",
"llama_index.set_global_service_context"
] | [((837, 877), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (867, 877), False, 'import llama_index\n'), ((1043, 1080), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (1071, 1080), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context, get_response_synthesizer\n'), ((1081, 1124), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1107, 1124), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context, get_response_synthesizer\n'), ((1423, 1459), 'os.path.join', 'os.path.join', (['"""../indexed_documents"""'], {}), "('../indexed_documents')\n", (1435, 1459), False, 'import os\n'), ((1504, 1561), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_file_path'}), '(persist_dir=index_file_path)\n', (1532, 1561), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context, get_response_synthesizer\n'), ((1583, 1623), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1606, 1623), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context, get_response_synthesizer\n'), ((1637, 1690), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(2)'}), '(index=index, similarity_top_k=2)\n', (1657, 1690), False, 'from llama_index.retrievers import VectorIndexRetriever, SummaryIndexLLMRetriever\n'), ((1804, 1830), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (1828, 1830), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context, get_response_synthesizer\n'), ((2388, 2424), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['new_summary_tmpl_str'], {}), '(new_summary_tmpl_str)\n', (2402, 2424), False, 'from llama_index.prompts import PromptTemplate\n'), ((967, 994), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (976, 994), False, 'import os\n'), ((1315, 1332), 'IPython.display.Markdown', 'Markdown', (['text_md'], {}), '(text_md)\n', (1323, 1332), False, 'from IPython.display import Markdown, display\n'), ((1382, 1402), 'IPython.display.Markdown', 'Markdown', (['"""<br><br>"""'], {}), "('<br><br>')\n", (1390, 1402), False, 'from IPython.display import Markdown, display\n'), ((1999, 2045), 'llama_index.indices.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.9)'}), '(similarity_cutoff=0.9)\n', (2022, 2045), False, 'from llama_index.indices.postprocessor import SimilarityPostprocessor\n')] |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.response.schema import Response
from llama_index.schema import Document
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.indices.managed.google.generativeai import (
GoogleIndex,
set_google_config,
)
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_from_corpus(mock_get_corpus: MagicMock) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleIndex.from_corpus(corpus_id="123")
# Assert
assert store.corpus_id == "123"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
def test_create_corpus(mock_create_corpus: MagicMock) -> None:
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_create_corpus.side_effect = fake_create_corpus
# Act
store = GoogleIndex.create_corpus(display_name="My first corpus")
# Assert
assert len(store.corpus_id) > 0
assert mock_create_corpus.call_count == 1
request = mock_create_corpus.call_args.args[0]
assert request.corpus.name == f"corpora/{store.corpus_id}"
assert request.corpus.display_name == "My first corpus"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_document")
def test_from_documents(
mock_get_document: MagicMock,
mock_batch_create_chunk: MagicMock,
mock_create_document: MagicMock,
mock_create_corpus: MagicMock,
) -> None:
from google.api_core import exceptions as gapi_exception
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_get_document.side_effect = gapi_exception.NotFound("")
mock_create_corpus.side_effect = fake_create_corpus
mock_create_document.return_value = genai.Document(name="corpora/123/documents/456")
mock_batch_create_chunk.side_effect = [
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/777"),
]
),
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/888"),
]
),
]
# Act
index = GoogleIndex.from_documents(
[
Document(text="Hello, my darling"),
Document(text="Goodbye, my baby"),
]
)
# Assert
assert mock_create_corpus.call_count == 1
create_corpus_request = mock_create_corpus.call_args.args[0]
assert create_corpus_request.corpus.name == f"corpora/{index.corpus_id}"
create_document_request = mock_create_document.call_args.args[0]
assert create_document_request.parent == f"corpora/{index.corpus_id}"
assert mock_batch_create_chunk.call_count == 2
first_batch_request = mock_batch_create_chunk.call_args_list[0].args[0]
assert (
first_batch_request.requests[0].chunk.data.string_value == "Hello, my darling"
)
second_batch_request = mock_batch_create_chunk.call_args_list[1].args[0]
assert (
second_batch_request.requests[0].chunk.data.string_value == "Goodbye, my baby"
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.query_corpus")
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_as_query_engine(
mock_get_corpus: MagicMock,
mock_generate_answer: MagicMock,
mock_query_corpus: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_query_corpus.return_value = genai.QueryCorpusResponse(
relevant_chunks=[
genai.RelevantChunk(
chunk=genai.Chunk(
name="corpora/123/documents/456/chunks/789",
data=genai.ChunkData(string_value="It's 42"),
),
chunk_relevance_score=0.9,
)
]
)
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
index = GoogleIndex.from_corpus(corpus_id="123")
query_engine = index.as_query_engine(
answer_style=genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
response = query_engine.query("What is the meaning of life?")
# Assert
assert mock_query_corpus.call_count == 1
query_corpus_request = mock_query_corpus.call_args.args[0]
assert query_corpus_request.name == "corpora/123"
assert query_corpus_request.query == "What is the meaning of life?"
assert isinstance(response, Response)
assert response.response == "42"
assert mock_generate_answer.call_count == 1
generate_answer_request = mock_generate_answer.call_args.args[0]
assert (
generate_answer_request.contents[0].parts[0].text
== "What is the meaning of life?"
)
assert (
generate_answer_request.answer_style
== genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
passages = generate_answer_request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
| [
"llama_index.schema.Document",
"llama_index.indices.managed.google.generativeai.GoogleIndex.from_corpus",
"llama_index.indices.managed.google.generativeai.set_google_config",
"llama_index.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.indices.managed.google.generativeai.GoogleIndex.create_corpus"
] | [((665, 724), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (683, 724), False, 'import pytest\n'), ((726, 770), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (731, 770), False, 'from unittest.mock import MagicMock, patch\n'), ((984, 1043), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1002, 1043), False, 'import pytest\n'), ((1045, 1116), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (1050, 1116), False, 'from unittest.mock import MagicMock, patch\n'), ((1374, 1433), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1392, 1433), False, 'import pytest\n'), ((1435, 1509), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (1440, 1509), False, 'from unittest.mock import MagicMock, patch\n'), ((2109, 2168), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2127, 2168), False, 'import pytest\n'), ((2170, 2244), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (2175, 2244), False, 'from unittest.mock import MagicMock, patch\n'), ((2246, 2322), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_document')\n", (2251, 2322), False, 'from unittest.mock import MagicMock, patch\n'), ((2324, 2409), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks'\n )\n", (2329, 2409), False, 'from unittest.mock import MagicMock, patch\n'), ((2406, 2479), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_document')\n", (2411, 2479), False, 'from unittest.mock import MagicMock, patch\n'), ((4370, 4429), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (4388, 4429), False, 'import pytest\n'), ((4431, 4504), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.query_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.query_corpus')\n", (4436, 4504), False, 'from unittest.mock import MagicMock, patch\n'), ((4506, 4583), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (4511, 4583), False, 'from unittest.mock import MagicMock, patch\n'), ((4585, 4656), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (4590, 4656), False, 'from unittest.mock import MagicMock, patch\n'), ((542, 643), 'llama_index.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (559, 643), False, 'from llama_index.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((840, 892), 'llama_index.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (857, 892), False, 'from llama_index.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((906, 925), 'llama_index.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (923, 925), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1224, 1256), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (1236, 1256), True, 'import google.ai.generativelanguage as genai\n'), ((1280, 1320), 'llama_index.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (1303, 1320), False, 'from llama_index.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((1777, 1834), 'llama_index.indices.managed.google.generativeai.GoogleIndex.create_corpus', 'GoogleIndex.create_corpus', ([], {'display_name': '"""My first corpus"""'}), "(display_name='My first corpus')\n", (1802, 1834), False, 'from llama_index.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((2885, 2912), 'google.api_core.exceptions.NotFound', 'gapi_exception.NotFound', (['""""""'], {}), "('')\n", (2908, 2912), True, 'from google.api_core import exceptions as gapi_exception\n'), ((3009, 3057), 'google.ai.generativelanguage.Document', 'genai.Document', ([], {'name': '"""corpora/123/documents/456"""'}), "(name='corpora/123/documents/456')\n", (3023, 3057), True, 'import google.ai.generativelanguage as genai\n'), ((4846, 4878), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (4858, 4878), True, 'import google.ai.generativelanguage as genai\n'), ((6596, 6636), 'llama_index.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (6619, 6636), False, 'from llama_index.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((3503, 3537), 'llama_index.schema.Document', 'Document', ([], {'text': '"""Hello, my darling"""'}), "(text='Hello, my darling')\n", (3511, 3537), False, 'from llama_index.schema import Document\n'), ((3551, 3584), 'llama_index.schema.Document', 'Document', ([], {'text': '"""Goodbye, my baby"""'}), "(text='Goodbye, my baby')\n", (3559, 3584), False, 'from llama_index.schema import Document\n'), ((3180, 3236), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/777"""'}), "(name='corpora/123/documents/456/chunks/777')\n", (3191, 3236), True, 'import google.ai.generativelanguage as genai\n'), ((3341, 3397), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/888"""'}), "(name='corpora/123/documents/456/chunks/888')\n", (3352, 3397), True, 'import google.ai.generativelanguage as genai\n'), ((5127, 5166), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""It\'s 42"""'}), '(string_value="It\'s 42")\n', (5142, 5166), True, 'import google.ai.generativelanguage as genai\n'), ((5403, 5424), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (5413, 5424), True, 'import google.ai.generativelanguage as genai\n'), ((5747, 5861), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (5791, 5861), True, 'import google.ai.generativelanguage as genai\n'), ((6209, 6323), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (6253, 6323), True, 'import google.ai.generativelanguage as genai\n'), ((5583, 5623), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (5593, 5623), True, 'import google.ai.generativelanguage as genai\n'), ((6075, 6106), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (6085, 6106), True, 'import google.ai.generativelanguage as genai\n')] |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.legacy.vector_stores.types import (
ExactMatchFilter,
MetadataFilters,
VectorStoreQuery,
)
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.vector_stores.google.generativeai import (
GoogleVectorStore,
set_google_config,
)
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
# Make sure the tests do not hit actual production servers.
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
def test_create_corpus(mock_create_corpus: MagicMock) -> None:
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_create_corpus.side_effect = fake_create_corpus
# Act
store = GoogleVectorStore.create_corpus(display_name="My first corpus")
# Assert
assert len(store.corpus_id) > 0
assert mock_create_corpus.call_count == 1
request = mock_create_corpus.call_args.args[0]
assert request.corpus.name == f"corpora/{store.corpus_id}"
assert request.corpus.display_name == "My first corpus"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_from_corpus(mock_get_corpus: MagicMock) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleVectorStore.from_corpus(corpus_id="123")
# Assert
assert store.corpus_id == "123"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
def test_class_name() -> None:
# Act
class_name = GoogleVectorStore.class_name()
# Assert
assert class_name == "GoogleVectorStore"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks")
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_add(
mock_get_corpus: MagicMock,
mock_get_document: MagicMock,
mock_create_document: MagicMock,
mock_batch_create_chunks: MagicMock,
) -> None:
from google.api_core import exceptions as gapi_exception
# Arrange
# We will use a max requests per batch to be 2.
# Then, we send 3 requests.
# We expect to have 2 batches where the last batch has only 1 request.
genaix._MAX_REQUEST_PER_CHUNK = 2
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_get_document.side_effect = gapi_exception.NotFound("")
mock_create_document.return_value = genai.Document(name="corpora/123/documents/456")
mock_batch_create_chunks.side_effect = [
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/777"),
genai.Chunk(name="corpora/123/documents/456/chunks/888"),
]
),
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/999"),
]
),
]
# Act
store = GoogleVectorStore.from_corpus(corpus_id="123")
response = store.add(
[
TextNode(
text="Hello my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Title for doc 456"},
)
},
metadata={"position": 100},
),
TextNode(
text="Hello my honey",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Title for doc 456"},
)
},
metadata={"position": 200},
),
TextNode(
text="Hello my ragtime gal",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Title for doc 456"},
)
},
metadata={"position": 300},
),
]
)
# Assert
assert response == [
"corpora/123/documents/456/chunks/777",
"corpora/123/documents/456/chunks/888",
"corpora/123/documents/456/chunks/999",
]
create_document_request = mock_create_document.call_args.args[0]
assert create_document_request == genai.CreateDocumentRequest(
parent="corpora/123",
document=genai.Document(
name="corpora/123/documents/456",
display_name="Title for doc 456",
custom_metadata=[
genai.CustomMetadata(
key="file_name",
string_value="Title for doc 456",
),
],
),
)
assert mock_batch_create_chunks.call_count == 2
mock_batch_create_chunks_calls = mock_batch_create_chunks.call_args_list
first_batch_create_chunks_request = mock_batch_create_chunks_calls[0].args[0]
assert first_batch_create_chunks_request == genai.BatchCreateChunksRequest(
parent="corpora/123/documents/456",
requests=[
genai.CreateChunkRequest(
parent="corpora/123/documents/456",
chunk=genai.Chunk(
data=genai.ChunkData(string_value="Hello my baby"),
custom_metadata=[
genai.CustomMetadata(
key="position",
numeric_value=100,
),
],
),
),
genai.CreateChunkRequest(
parent="corpora/123/documents/456",
chunk=genai.Chunk(
data=genai.ChunkData(string_value="Hello my honey"),
custom_metadata=[
genai.CustomMetadata(
key="position",
numeric_value=200,
),
],
),
),
],
)
second_batch_create_chunks_request = mock_batch_create_chunks_calls[1].args[0]
assert second_batch_create_chunks_request == genai.BatchCreateChunksRequest(
parent="corpora/123/documents/456",
requests=[
genai.CreateChunkRequest(
parent="corpora/123/documents/456",
chunk=genai.Chunk(
data=genai.ChunkData(string_value="Hello my ragtime gal"),
custom_metadata=[
genai.CustomMetadata(
key="position",
numeric_value=300,
),
],
),
),
],
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.delete_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_delete(
mock_get_corpus: MagicMock,
mock_delete_document: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.delete(ref_doc_id="doc-456")
# Assert
delete_document_request = mock_delete_document.call_args.args[0]
assert delete_document_request == genai.DeleteDocumentRequest(
name="corpora/123/documents/doc-456",
force=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.query_corpus")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_query(
mock_get_corpus: MagicMock,
mock_query_corpus: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_query_corpus.return_value = genai.QueryCorpusResponse(
relevant_chunks=[
genai.RelevantChunk(
chunk=genai.Chunk(
name="corpora/123/documents/456/chunks/789",
data=genai.ChunkData(string_value="42"),
),
chunk_relevance_score=0.9,
)
]
)
# Act
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
similarity_top_k=1,
)
)
# Assert
assert mock_query_corpus.call_count == 1
query_corpus_request = mock_query_corpus.call_args.args[0]
assert query_corpus_request == genai.QueryCorpusRequest(
name="corpora/123",
query="What is the meaning of life?",
metadata_filters=[
genai.MetadataFilter(
key="author",
conditions=[
genai.Condition(
operation=genai.Condition.Operator.EQUAL,
string_value="Arthur Schopenhauer",
)
],
)
],
results_count=1,
)
| [
"llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.create_corpus",
"llama_index.legacy.vector_stores.types.ExactMatchFilter",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus",
"llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.class_name",
"llama_index.legacy.vector_stores.google.generativeai.set_google_config",
"llama_index.legacy.schema.RelatedNodeInfo"
] | [((855, 914), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (873, 914), False, 'import pytest\n'), ((916, 960), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (921, 960), False, 'from unittest.mock import MagicMock, patch\n'), ((1174, 1233), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1192, 1233), False, 'import pytest\n'), ((1235, 1309), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (1240, 1309), False, 'from unittest.mock import MagicMock, patch\n'), ((1915, 1974), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1933, 1974), False, 'import pytest\n'), ((1976, 2047), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (1981, 2047), False, 'from unittest.mock import MagicMock, patch\n'), ((2311, 2370), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2329, 2370), False, 'import pytest\n'), ((2522, 2581), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2540, 2581), False, 'import pytest\n'), ((2583, 2668), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks'\n )\n", (2588, 2668), False, 'from unittest.mock import MagicMock, patch\n'), ((2665, 2741), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_document')\n", (2670, 2741), False, 'from unittest.mock import MagicMock, patch\n'), ((2743, 2816), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_document')\n", (2748, 2816), False, 'from unittest.mock import MagicMock, patch\n'), ((2818, 2889), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (2823, 2889), False, 'from unittest.mock import MagicMock, patch\n'), ((7904, 7963), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (7922, 7963), False, 'import pytest\n'), ((7965, 8041), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.delete_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.delete_document')\n", (7970, 8041), False, 'from unittest.mock import MagicMock, patch\n'), ((8043, 8114), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (8048, 8114), False, 'from unittest.mock import MagicMock, patch\n'), ((8628, 8687), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (8646, 8687), False, 'import pytest\n'), ((8689, 8762), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.query_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.query_corpus')\n", (8694, 8762), False, 'from unittest.mock import MagicMock, patch\n'), ((8764, 8835), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (8769, 8835), False, 'from unittest.mock import MagicMock, patch\n'), ((732, 833), 'llama_index.legacy.vector_stores.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (749, 833), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((1030, 1082), 'llama_index.legacy.vector_stores.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (1047, 1082), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((1096, 1115), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (1113, 1115), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1577, 1640), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.create_corpus', 'GoogleVectorStore.create_corpus', ([], {'display_name': '"""My first corpus"""'}), "(display_name='My first corpus')\n", (1608, 1640), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((2155, 2187), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (2167, 2187), True, 'import google.ai.generativelanguage as genai\n'), ((2211, 2257), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (2240, 2257), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((2429, 2459), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.class_name', 'GoogleVectorStore.class_name', ([], {}), '()\n', (2457, 2459), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((3367, 3399), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (3379, 3399), True, 'import google.ai.generativelanguage as genai\n'), ((3436, 3463), 'google.api_core.exceptions.NotFound', 'gapi_exception.NotFound', (['""""""'], {}), "('')\n", (3459, 3463), True, 'from google.api_core import exceptions as gapi_exception\n'), ((3504, 3552), 'google.ai.generativelanguage.Document', 'genai.Document', ([], {'name': '"""corpora/123/documents/456"""'}), "(name='corpora/123/documents/456')\n", (3518, 3552), True, 'import google.ai.generativelanguage as genai\n'), ((4023, 4069), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (4052, 4069), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((8261, 8293), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (8273, 8293), True, 'import google.ai.generativelanguage as genai\n'), ((8317, 8363), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (8346, 8363), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((8978, 9010), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (8990, 9010), True, 'import google.ai.generativelanguage as genai\n'), ((9410, 9456), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (9439, 9456), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((8524, 8601), 'google.ai.generativelanguage.DeleteDocumentRequest', 'genai.DeleteDocumentRequest', ([], {'name': '"""corpora/123/documents/doc-456"""', 'force': '(True)'}), "(name='corpora/123/documents/doc-456', force=True)\n", (8551, 8601), True, 'import google.ai.generativelanguage as genai\n'), ((3676, 3732), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/777"""'}), "(name='corpora/123/documents/456/chunks/777')\n", (3687, 3732), True, 'import google.ai.generativelanguage as genai\n'), ((3750, 3806), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/888"""'}), "(name='corpora/123/documents/456/chunks/888')\n", (3761, 3806), True, 'import google.ai.generativelanguage as genai\n'), ((3911, 3967), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/999"""'}), "(name='corpora/123/documents/456/chunks/999')\n", (3922, 3967), True, 'import google.ai.generativelanguage as genai\n'), ((4243, 4318), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""456"""', 'metadata': "{'file_name': 'Title for doc 456'}"}), "(node_id='456', metadata={'file_name': 'Title for doc 456'})\n", (4258, 4318), False, 'from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode\n'), ((4606, 4681), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""456"""', 'metadata': "{'file_name': 'Title for doc 456'}"}), "(node_id='456', metadata={'file_name': 'Title for doc 456'})\n", (4621, 4681), False, 'from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode\n'), ((4975, 5050), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""456"""', 'metadata': "{'file_name': 'Title for doc 456'}"}), "(node_id='456', metadata={'file_name': 'Title for doc 456'})\n", (4990, 5050), False, 'from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode\n'), ((5743, 5814), 'google.ai.generativelanguage.CustomMetadata', 'genai.CustomMetadata', ([], {'key': '"""file_name"""', 'string_value': '"""Title for doc 456"""'}), "(key='file_name', string_value='Title for doc 456')\n", (5763, 5814), True, 'import google.ai.generativelanguage as genai\n'), ((9259, 9293), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""42"""'}), "(string_value='42')\n", (9274, 9293), True, 'import google.ai.generativelanguage as genai\n'), ((9643, 9702), 'llama_index.legacy.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""author"""', 'value': '"""Arthur Schopenhauer"""'}), "(key='author', value='Arthur Schopenhauer')\n", (9659, 9702), False, 'from llama_index.legacy.vector_stores.types import ExactMatchFilter, MetadataFilters, VectorStoreQuery\n'), ((10252, 10350), 'google.ai.generativelanguage.Condition', 'genai.Condition', ([], {'operation': 'genai.Condition.Operator.EQUAL', 'string_value': '"""Arthur Schopenhauer"""'}), "(operation=genai.Condition.Operator.EQUAL, string_value=\n 'Arthur Schopenhauer')\n", (10267, 10350), True, 'import google.ai.generativelanguage as genai\n'), ((6413, 6458), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""Hello my baby"""'}), "(string_value='Hello my baby')\n", (6428, 6458), True, 'import google.ai.generativelanguage as genai\n'), ((6869, 6915), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""Hello my honey"""'}), "(string_value='Hello my honey')\n", (6884, 6915), True, 'import google.ai.generativelanguage as genai\n'), ((7571, 7623), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""Hello my ragtime gal"""'}), "(string_value='Hello my ragtime gal')\n", (7586, 7623), True, 'import google.ai.generativelanguage as genai\n'), ((6522, 6577), 'google.ai.generativelanguage.CustomMetadata', 'genai.CustomMetadata', ([], {'key': '"""position"""', 'numeric_value': '(100)'}), "(key='position', numeric_value=100)\n", (6542, 6577), True, 'import google.ai.generativelanguage as genai\n'), ((6979, 7034), 'google.ai.generativelanguage.CustomMetadata', 'genai.CustomMetadata', ([], {'key': '"""position"""', 'numeric_value': '(200)'}), "(key='position', numeric_value=200)\n", (6999, 7034), True, 'import google.ai.generativelanguage as genai\n'), ((7687, 7742), 'google.ai.generativelanguage.CustomMetadata', 'genai.CustomMetadata', ([], {'key': '"""position"""', 'numeric_value': '(300)'}), "(key='position', numeric_value=300)\n", (7707, 7742), True, 'import google.ai.generativelanguage as genai\n')] |
import logging
from dataclasses import dataclass
from typing import List, Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.base import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser, TextSplitter
from llama_index.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.prompts.base import BasePromptTemplate
from llama_index.schema import TransformComponent
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
if not isinstance(self.llm_predictor, LLMPredictor):
raise ValueError("llm_predictor must be an instance of LLMPredictor")
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.extractors.loading import load_extractor
from llama_index.llm_predictor.loading import load_predictor
from llama_index.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.extractors.loading.load_extractor",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.utils.resolve_embed_model"
] | [((1019, 1046), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1036, 1046), False, 'import logging\n'), ((1821, 1878), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1851, 1878), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6806, 6838), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (6825, 6838), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10367, 10399), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (10386, 10399), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((13684, 13734), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (13698, 13734), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((13758, 13808), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (13774, 13808), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((13834, 13892), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (13856, 13892), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5940, 5959), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5955, 5959), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6127, 6143), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6138, 6143), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6185, 6251), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6197, 6251), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((7714, 7727), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (7725, 7727), False, 'from llama_index.logger import LlamaLogger\n'), ((9662, 9678), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (9673, 9678), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((9707, 9728), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (9719, 9728), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1420, 1437), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1435, 1437), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14068, 14090), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14079, 14090), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14162, 14187), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14176, 14187), False, 'from llama_index.extractors.loading import load_extractor\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import requests
from llama_index.core.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index.core", "import llama_index.core"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
import pkg_resources
from pkg_resources import DistributionNotFound
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
skip_load: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.core.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
if skip_load:
return None
# loads the module into memory
if override_path:
path = f"{dirpath}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
else:
path = f"{dirpath}/{module_id}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.core.download.utils.initialize_directory",
"llama_index.core.download.utils.get_exports"
] | [((574, 601), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (591, 601), False, 'import logging\n'), ((5497, 5530), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5511, 5530), False, 'import os\n'), ((7469, 7537), 'llama_index.core.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7489, 7537), False, 'from llama_index.core.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8867, 8894), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8888, 8894), False, 'from importlib import util\n'), ((1134, 1154), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1138, 1154), False, 'from pathlib import Path\n'), ((1371, 1405), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1385, 1405), False, 'import os\n'), ((1875, 1906), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1885, 1906), False, 'import json\n'), ((2200, 2235), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2215, 2235), False, 'import os\n'), ((3068, 3088), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3072, 3088), False, 'from pathlib import Path\n'), ((3284, 3323), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3295, 3323), False, 'import os\n'), ((5086, 5119), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5100, 5119), False, 'import os\n'), ((8475, 8535), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8503, 8535), False, 'from importlib import util\n'), ((8705, 8765), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8733, 8765), False, 'from importlib import util\n'), ((9317, 9419), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9330, 9419), False, 'import requests\n'), ((1473, 1485), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1482, 1485), False, 'import json\n'), ((3247, 3274), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3261, 3274), False, 'import os\n'), ((4332, 4367), 'llama_index.core.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4343, 4367), False, 'from llama_index.core.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2293, 2326), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2307, 2326), False, 'import os\n'), ((2344, 2374), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2355, 2374), False, 'import os\n'), ((5867, 5959), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5888, 5959), False, 'import subprocess\n'), ((4567, 4597), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4581, 4597), False, 'import os\n'), ((5707, 5730), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5711, 5730), False, 'from pathlib import Path\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import requests
from llama_index.core.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index.core", "import llama_index.core"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
import pkg_resources
from pkg_resources import DistributionNotFound
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
skip_load: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.core.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
if skip_load:
return None
# loads the module into memory
if override_path:
path = f"{dirpath}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
else:
path = f"{dirpath}/{module_id}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.core.download.utils.initialize_directory",
"llama_index.core.download.utils.get_exports"
] | [((574, 601), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (591, 601), False, 'import logging\n'), ((5497, 5530), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5511, 5530), False, 'import os\n'), ((7469, 7537), 'llama_index.core.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7489, 7537), False, 'from llama_index.core.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8867, 8894), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8888, 8894), False, 'from importlib import util\n'), ((1134, 1154), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1138, 1154), False, 'from pathlib import Path\n'), ((1371, 1405), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1385, 1405), False, 'import os\n'), ((1875, 1906), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1885, 1906), False, 'import json\n'), ((2200, 2235), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2215, 2235), False, 'import os\n'), ((3068, 3088), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3072, 3088), False, 'from pathlib import Path\n'), ((3284, 3323), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3295, 3323), False, 'import os\n'), ((5086, 5119), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5100, 5119), False, 'import os\n'), ((8475, 8535), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8503, 8535), False, 'from importlib import util\n'), ((8705, 8765), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8733, 8765), False, 'from importlib import util\n'), ((9317, 9419), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9330, 9419), False, 'import requests\n'), ((1473, 1485), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1482, 1485), False, 'import json\n'), ((3247, 3274), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3261, 3274), False, 'import os\n'), ((4332, 4367), 'llama_index.core.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4343, 4367), False, 'from llama_index.core.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2293, 2326), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2307, 2326), False, 'import os\n'), ((2344, 2374), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2355, 2374), False, 'import os\n'), ((5867, 5959), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5888, 5959), False, 'import subprocess\n'), ((4567, 4597), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4581, 4597), False, 'import os\n'), ((5707, 5730), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5711, 5730), False, 'from pathlib import Path\n')] |
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import Ollama
from llama_index.vector_stores.qdrant import QdrantVectorStore
import llama_index
llama_index.set_global_handler("simple")
# re-initialize the vector store
client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
vector_store = QdrantVectorStore(client=client, collection_name="tweets")
# get the LLM again
llm = Ollama(model="mixtral")
service_context = ServiceContext.from_defaults(llm=llm,embed_model="local")
# load the index from the vector store
index = VectorStoreIndex.from_vector_store(vector_store=vector_store,service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=20)
response = query_engine.query("Does the author like web frameworks? Give details.")
print(response)
| [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.Ollama",
"llama_index.set_global_handler",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((219, 259), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (249, 259), False, 'import llama_index\n'), ((306, 354), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (332, 354), False, 'import qdrant_client\n'), ((379, 437), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""tweets"""'}), "(client=client, collection_name='tweets')\n", (396, 437), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((468, 491), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""mixtral"""'}), "(model='mixtral')\n", (474, 491), False, 'from llama_index.llms import Ollama\n'), ((511, 569), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (539, 569), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((620, 718), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (654, 718), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
"""General utils functions."""
import asyncio
import os
import random
import sys
import time
import traceback
import uuid
from contextlib import contextmanager
from dataclasses import dataclass
from functools import partial, wraps
from itertools import islice
from pathlib import Path
from typing import (
Any,
AsyncGenerator,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Protocol,
Set,
Type,
Union,
cast,
runtime_checkable,
)
class GlobalsHelper:
"""Helper to retrieve globals.
Helpful for global caching of certain variables that can be expensive to load.
(e.g. tokenization)
"""
_tokenizer: Optional[Callable[[str], List]] = None
_stopwords: Optional[List[str]] = None
@property
def tokenizer(self) -> Callable[[str], List]:
"""Get tokenizer. TODO: Deprecated."""
if self._tokenizer is None:
tiktoken_import_err = (
"`tiktoken` package not found, please run `pip install tiktoken`"
)
try:
import tiktoken
except ImportError:
raise ImportError(tiktoken_import_err)
enc = tiktoken.get_encoding("gpt2")
self._tokenizer = cast(Callable[[str], List], enc.encode)
self._tokenizer = partial(self._tokenizer, allowed_special="all")
return self._tokenizer # type: ignore
@property
def stopwords(self) -> List[str]:
"""Get stopwords."""
if self._stopwords is None:
try:
import nltk
from nltk.corpus import stopwords
except ImportError:
raise ImportError(
"`nltk` package not found, please run `pip install nltk`"
)
from llama_index.utils import get_cache_dir
cache_dir = get_cache_dir()
nltk_data_dir = os.environ.get("NLTK_DATA", cache_dir)
# update nltk path for nltk so that it finds the data
if nltk_data_dir not in nltk.data.path:
nltk.data.path.append(nltk_data_dir)
try:
nltk.data.find("corpora/stopwords")
except LookupError:
nltk.download("stopwords", download_dir=nltk_data_dir)
self._stopwords = stopwords.words("english")
return self._stopwords
globals_helper = GlobalsHelper()
# Global Tokenizer
@runtime_checkable
class Tokenizer(Protocol):
def encode(self, text: str, *args: Any, **kwargs: Any) -> List[Any]:
...
def set_global_tokenizer(tokenizer: Union[Tokenizer, Callable[[str], list]]) -> None:
import llama_index
if isinstance(tokenizer, Tokenizer):
llama_index.global_tokenizer = tokenizer.encode
else:
llama_index.global_tokenizer = tokenizer
def get_tokenizer() -> Callable[[str], List]:
import llama_index
if llama_index.global_tokenizer is None:
tiktoken_import_err = (
"`tiktoken` package not found, please run `pip install tiktoken`"
)
try:
import tiktoken
except ImportError:
raise ImportError(tiktoken_import_err)
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
tokenizer = partial(enc.encode, allowed_special="all")
set_global_tokenizer(tokenizer)
assert llama_index.global_tokenizer is not None
return llama_index.global_tokenizer
def get_new_id(d: Set) -> str:
"""Get a new ID."""
while True:
new_id = str(uuid.uuid4())
if new_id not in d:
break
return new_id
def get_new_int_id(d: Set) -> int:
"""Get a new integer ID."""
while True:
new_id = random.randint(0, sys.maxsize)
if new_id not in d:
break
return new_id
@contextmanager
def temp_set_attrs(obj: Any, **kwargs: Any) -> Generator:
"""Temporary setter.
Utility class for setting a temporary value for an attribute on a class.
Taken from: https://tinyurl.com/2p89xymh
"""
prev_values = {k: getattr(obj, k) for k in kwargs}
for k, v in kwargs.items():
setattr(obj, k, v)
try:
yield
finally:
for k, v in prev_values.items():
setattr(obj, k, v)
@dataclass
class ErrorToRetry:
"""Exception types that should be retried.
Args:
exception_cls (Type[Exception]): Class of exception.
check_fn (Optional[Callable[[Any]], bool]]):
A function that takes an exception instance as input and returns
whether to retry.
"""
exception_cls: Type[Exception]
check_fn: Optional[Callable[[Any], bool]] = None
def retry_on_exceptions_with_backoff(
lambda_fn: Callable,
errors_to_retry: List[ErrorToRetry],
max_tries: int = 10,
min_backoff_secs: float = 0.5,
max_backoff_secs: float = 60.0,
) -> Any:
"""Execute lambda function with retries and exponential backoff.
Args:
lambda_fn (Callable): Function to be called and output we want.
errors_to_retry (List[ErrorToRetry]): List of errors to retry.
At least one needs to be provided.
max_tries (int): Maximum number of tries, including the first. Defaults to 10.
min_backoff_secs (float): Minimum amount of backoff time between attempts.
Defaults to 0.5.
max_backoff_secs (float): Maximum amount of backoff time between attempts.
Defaults to 60.
"""
if not errors_to_retry:
raise ValueError("At least one error to retry needs to be provided")
error_checks = {
error_to_retry.exception_cls: error_to_retry.check_fn
for error_to_retry in errors_to_retry
}
exception_class_tuples = tuple(error_checks.keys())
backoff_secs = min_backoff_secs
tries = 0
while True:
try:
return lambda_fn()
except exception_class_tuples as e:
traceback.print_exc()
tries += 1
if tries >= max_tries:
raise
check_fn = error_checks.get(e.__class__)
if check_fn and not check_fn(e):
raise
time.sleep(backoff_secs)
backoff_secs = min(backoff_secs * 2, max_backoff_secs)
def truncate_text(text: str, max_length: int) -> str:
"""Truncate text to a maximum length."""
if len(text) <= max_length:
return text
return text[: max_length - 3] + "..."
def iter_batch(iterable: Union[Iterable, Generator], size: int) -> Iterable:
"""Iterate over an iterable in batches.
>>> list(iter_batch([1,2,3,4,5], 3))
[[1, 2, 3], [4, 5]]
"""
source_iter = iter(iterable)
while source_iter:
b = list(islice(source_iter, size))
if len(b) == 0:
break
yield b
def concat_dirs(dirname: str, basename: str) -> str:
"""
Append basename to dirname, avoiding backslashes when running on windows.
os.path.join(dirname, basename) will add a backslash before dirname if
basename does not end with a slash, so we make sure it does.
"""
dirname += "/" if dirname[-1] != "/" else ""
return os.path.join(dirname, basename)
def get_tqdm_iterable(items: Iterable, show_progress: bool, desc: str) -> Iterable:
"""
Optionally get a tqdm iterable. Ensures tqdm.auto is used.
"""
_iterator = items
if show_progress:
try:
from tqdm.auto import tqdm
return tqdm(items, desc=desc)
except ImportError:
pass
return _iterator
def count_tokens(text: str) -> int:
tokenizer = get_tokenizer()
tokens = tokenizer(text)
return len(tokens)
def get_transformer_tokenizer_fn(model_name: str) -> Callable[[str], List[str]]:
"""
Args:
model_name(str): the model name of the tokenizer.
For instance, fxmarty/tiny-llama-fast-tokenizer.
"""
try:
from transformers import AutoTokenizer
except ImportError:
raise ValueError(
"`transformers` package not found, please run `pip install transformers`"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
return tokenizer.tokenize
def get_cache_dir() -> str:
"""Locate a platform-appropriate cache directory for llama_index,
and create it if it doesn't yet exist.
"""
# User override
if "LLAMA_INDEX_CACHE_DIR" in os.environ:
path = Path(os.environ["LLAMA_INDEX_CACHE_DIR"])
# Linux, Unix, AIX, etc.
elif os.name == "posix" and sys.platform != "darwin":
path = Path("/tmp/llama_index")
# Mac OS
elif sys.platform == "darwin":
path = Path(os.path.expanduser("~"), "Library/Caches/llama_index")
# Windows (hopefully)
else:
local = os.environ.get("LOCALAPPDATA", None) or os.path.expanduser(
"~\\AppData\\Local"
)
path = Path(local, "llama_index")
if not os.path.exists(path):
os.makedirs(
path, exist_ok=True
) # prevents https://github.com/jerryjliu/llama_index/issues/7362
return str(path)
def add_sync_version(func: Any) -> Any:
"""Decorator for adding sync version of an async function. The sync version
is added as a function attribute to the original function, func.
Args:
func(Any): the async function for which a sync variant will be built.
"""
assert asyncio.iscoroutinefunction(func)
@wraps(func)
def _wrapper(*args: Any, **kwds: Any) -> Any:
return asyncio.get_event_loop().run_until_complete(func(*args, **kwds))
func.sync = _wrapper
return func
# Sample text from llama_index's readme
SAMPLE_TEXT = """
Context
LLMs are a phenomenal piece of technology for knowledge generation and reasoning.
They are pre-trained on large amounts of publicly available data.
How do we best augment LLMs with our own private data?
We need a comprehensive toolkit to help perform this data augmentation for LLMs.
Proposed Solution
That's where LlamaIndex comes in. LlamaIndex is a "data framework" to help
you build LLM apps. It provides the following tools:
Offers data connectors to ingest your existing data sources and data formats
(APIs, PDFs, docs, SQL, etc.)
Provides ways to structure your data (indices, graphs) so that this data can be
easily used with LLMs.
Provides an advanced retrieval/query interface over your data:
Feed in any LLM input prompt, get back retrieved context and knowledge-augmented output.
Allows easy integrations with your outer application framework
(e.g. with LangChain, Flask, Docker, ChatGPT, anything else).
LlamaIndex provides tools for both beginner users and advanced users.
Our high-level API allows beginner users to use LlamaIndex to ingest and
query their data in 5 lines of code. Our lower-level APIs allow advanced users to
customize and extend any module (data connectors, indices, retrievers, query engines,
reranking modules), to fit their needs.
"""
_LLAMA_INDEX_COLORS = {
"llama_pink": "38;2;237;90;200",
"llama_blue": "38;2;90;149;237",
"llama_turquoise": "38;2;11;159;203",
"llama_lavender": "38;2;155;135;227",
}
_ANSI_COLORS = {
"red": "31",
"green": "32",
"yellow": "33",
"blue": "34",
"magenta": "35",
"cyan": "36",
"pink": "38;5;200",
}
def get_color_mapping(
items: List[str], use_llama_index_colors: bool = True
) -> Dict[str, str]:
"""
Get a mapping of items to colors.
Args:
items (List[str]): List of items to be mapped to colors.
use_llama_index_colors (bool, optional): Flag to indicate
whether to use LlamaIndex colors or ANSI colors.
Defaults to True.
Returns:
Dict[str, str]: Mapping of items to colors.
"""
if use_llama_index_colors:
color_palette = _LLAMA_INDEX_COLORS
else:
color_palette = _ANSI_COLORS
colors = list(color_palette.keys())
return {item: colors[i % len(colors)] for i, item in enumerate(items)}
def _get_colored_text(text: str, color: str) -> str:
"""
Get the colored version of the input text.
Args:
text (str): Input text.
color (str): Color to be applied to the text.
Returns:
str: Colored version of the input text.
"""
all_colors = {**_LLAMA_INDEX_COLORS, **_ANSI_COLORS}
if color not in all_colors:
return f"\033[1;3m{text}\033[0m" # just bolded and italicized
color = all_colors[color]
return f"\033[1;3;{color}m{text}\033[0m"
def print_text(text: str, color: Optional[str] = None, end: str = "") -> None:
"""
Print the text with the specified color.
Args:
text (str): Text to be printed.
color (str, optional): Color to be applied to the text. Supported colors are:
llama_pink, llama_blue, llama_turquoise, llama_lavender,
red, green, yellow, blue, magenta, cyan, pink.
end (str, optional): String appended after the last character of the text.
Returns:
None
"""
text_to_print = _get_colored_text(text, color) if color is not None else text
print(text_to_print, end=end)
def infer_torch_device() -> str:
"""Infer the input to torch.device."""
try:
has_cuda = torch.cuda.is_available()
except NameError:
import torch
has_cuda = torch.cuda.is_available()
if has_cuda:
return "cuda"
if torch.backends.mps.is_available():
return "mps"
return "cpu"
def unit_generator(x: Any) -> Generator[Any, None, None]:
"""A function that returns a generator of a single element.
Args:
x (Any): the element to build yield
Yields:
Any: the single element
"""
yield x
async def async_unit_generator(x: Any) -> AsyncGenerator[Any, None]:
"""A function that returns a generator of a single element.
Args:
x (Any): the element to build yield
Yields:
Any: the single element
"""
yield x
| [
"llama_index.utils.get_cache_dir"
] | [((7192, 7223), 'os.path.join', 'os.path.join', (['dirname', 'basename'], {}), '(dirname, basename)\n', (7204, 7223), False, 'import os\n'), ((8174, 8215), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (8203, 8215), False, 'from transformers import AutoTokenizer\n'), ((9451, 9484), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (9478, 9484), False, 'import asyncio\n'), ((9491, 9502), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (9496, 9502), False, 'from functools import partial, wraps\n'), ((13466, 13499), 'torch.backends.mps.is_available', 'torch.backends.mps.is_available', ([], {}), '()\n', (13497, 13499), False, 'import torch\n'), ((3225, 3269), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (3252, 3269), False, 'import tiktoken\n'), ((3290, 3332), 'functools.partial', 'partial', (['enc.encode'], {'allowed_special': '"""all"""'}), "(enc.encode, allowed_special='all')\n", (3297, 3332), False, 'from functools import partial, wraps\n'), ((3740, 3770), 'random.randint', 'random.randint', (['(0)', 'sys.maxsize'], {}), '(0, sys.maxsize)\n', (3754, 3770), False, 'import random\n'), ((8478, 8519), 'pathlib.Path', 'Path', (["os.environ['LLAMA_INDEX_CACHE_DIR']"], {}), "(os.environ['LLAMA_INDEX_CACHE_DIR'])\n", (8482, 8519), False, 'from pathlib import Path\n'), ((8981, 9001), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8995, 9001), False, 'import os\n'), ((9011, 9043), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (9022, 9043), False, 'import os\n'), ((13305, 13330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13328, 13330), False, 'import torch\n'), ((1200, 1229), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""gpt2"""'], {}), "('gpt2')\n", (1221, 1229), False, 'import tiktoken\n'), ((1260, 1299), 'typing.cast', 'cast', (['Callable[[str], List]', 'enc.encode'], {}), '(Callable[[str], List], enc.encode)\n', (1264, 1299), False, 'from typing import Any, AsyncGenerator, Callable, Dict, Generator, Iterable, List, Optional, Protocol, Set, Type, Union, cast, runtime_checkable\n'), ((1330, 1377), 'functools.partial', 'partial', (['self._tokenizer'], {'allowed_special': '"""all"""'}), "(self._tokenizer, allowed_special='all')\n", (1337, 1377), False, 'from functools import partial, wraps\n'), ((1883, 1898), 'llama_index.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (1896, 1898), False, 'from llama_index.utils import get_cache_dir\n'), ((1927, 1965), 'os.environ.get', 'os.environ.get', (['"""NLTK_DATA"""', 'cache_dir'], {}), "('NLTK_DATA', cache_dir)\n", (1941, 1965), False, 'import os\n'), ((2341, 2367), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2356, 2367), False, 'from nltk.corpus import stopwords\n'), ((3560, 3572), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3570, 3572), False, 'import uuid\n'), ((6757, 6782), 'itertools.islice', 'islice', (['source_iter', 'size'], {}), '(source_iter, size)\n', (6763, 6782), False, 'from itertools import islice\n'), ((7505, 7527), 'tqdm.auto.tqdm', 'tqdm', (['items'], {'desc': 'desc'}), '(items, desc=desc)\n', (7509, 7527), False, 'from tqdm.auto import tqdm\n'), ((8623, 8647), 'pathlib.Path', 'Path', (['"""/tmp/llama_index"""'], {}), "('/tmp/llama_index')\n", (8627, 8647), False, 'from pathlib import Path\n'), ((13394, 13419), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13417, 13419), False, 'import torch\n'), ((2101, 2137), 'nltk.data.path.append', 'nltk.data.path.append', (['nltk_data_dir'], {}), '(nltk_data_dir)\n', (2122, 2137), False, 'import nltk\n'), ((2172, 2207), 'nltk.data.find', 'nltk.data.find', (['"""corpora/stopwords"""'], {}), "('corpora/stopwords')\n", (2186, 2207), False, 'import nltk\n'), ((5966, 5987), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5985, 5987), False, 'import traceback\n'), ((6200, 6224), 'time.sleep', 'time.sleep', (['backoff_secs'], {}), '(backoff_secs)\n', (6210, 6224), False, 'import time\n'), ((8942, 8968), 'pathlib.Path', 'Path', (['local', '"""llama_index"""'], {}), "(local, 'llama_index')\n", (8946, 8968), False, 'from pathlib import Path\n'), ((9568, 9592), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9590, 9592), False, 'import asyncio\n'), ((2256, 2310), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {'download_dir': 'nltk_data_dir'}), "('stopwords', download_dir=nltk_data_dir)\n", (2269, 2310), False, 'import nltk\n'), ((8717, 8740), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (8735, 8740), False, 'import os\n'), ((8825, 8861), 'os.environ.get', 'os.environ.get', (['"""LOCALAPPDATA"""', 'None'], {}), "('LOCALAPPDATA', None)\n", (8839, 8861), False, 'import os\n'), ((8865, 8904), 'os.path.expanduser', 'os.path.expanduser', (['"""~\\\\AppData\\\\Local"""'], {}), "('~\\\\AppData\\\\Local')\n", (8883, 8904), False, 'import os\n')] |
"""General utils functions."""
import asyncio
import os
import random
import sys
import time
import traceback
import uuid
from contextlib import contextmanager
from dataclasses import dataclass
from functools import partial, wraps
from itertools import islice
from pathlib import Path
from typing import (
Any,
AsyncGenerator,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Protocol,
Set,
Type,
Union,
cast,
runtime_checkable,
)
class GlobalsHelper:
"""Helper to retrieve globals.
Helpful for global caching of certain variables that can be expensive to load.
(e.g. tokenization)
"""
_tokenizer: Optional[Callable[[str], List]] = None
_stopwords: Optional[List[str]] = None
@property
def tokenizer(self) -> Callable[[str], List]:
"""Get tokenizer. TODO: Deprecated."""
if self._tokenizer is None:
tiktoken_import_err = (
"`tiktoken` package not found, please run `pip install tiktoken`"
)
try:
import tiktoken
except ImportError:
raise ImportError(tiktoken_import_err)
enc = tiktoken.get_encoding("gpt2")
self._tokenizer = cast(Callable[[str], List], enc.encode)
self._tokenizer = partial(self._tokenizer, allowed_special="all")
return self._tokenizer # type: ignore
@property
def stopwords(self) -> List[str]:
"""Get stopwords."""
if self._stopwords is None:
try:
import nltk
from nltk.corpus import stopwords
except ImportError:
raise ImportError(
"`nltk` package not found, please run `pip install nltk`"
)
from llama_index.utils import get_cache_dir
cache_dir = get_cache_dir()
nltk_data_dir = os.environ.get("NLTK_DATA", cache_dir)
# update nltk path for nltk so that it finds the data
if nltk_data_dir not in nltk.data.path:
nltk.data.path.append(nltk_data_dir)
try:
nltk.data.find("corpora/stopwords")
except LookupError:
nltk.download("stopwords", download_dir=nltk_data_dir)
self._stopwords = stopwords.words("english")
return self._stopwords
globals_helper = GlobalsHelper()
# Global Tokenizer
@runtime_checkable
class Tokenizer(Protocol):
def encode(self, text: str, *args: Any, **kwargs: Any) -> List[Any]:
...
def set_global_tokenizer(tokenizer: Union[Tokenizer, Callable[[str], list]]) -> None:
import llama_index
if isinstance(tokenizer, Tokenizer):
llama_index.global_tokenizer = tokenizer.encode
else:
llama_index.global_tokenizer = tokenizer
def get_tokenizer() -> Callable[[str], List]:
import llama_index
if llama_index.global_tokenizer is None:
tiktoken_import_err = (
"`tiktoken` package not found, please run `pip install tiktoken`"
)
try:
import tiktoken
except ImportError:
raise ImportError(tiktoken_import_err)
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
tokenizer = partial(enc.encode, allowed_special="all")
set_global_tokenizer(tokenizer)
assert llama_index.global_tokenizer is not None
return llama_index.global_tokenizer
def get_new_id(d: Set) -> str:
"""Get a new ID."""
while True:
new_id = str(uuid.uuid4())
if new_id not in d:
break
return new_id
def get_new_int_id(d: Set) -> int:
"""Get a new integer ID."""
while True:
new_id = random.randint(0, sys.maxsize)
if new_id not in d:
break
return new_id
@contextmanager
def temp_set_attrs(obj: Any, **kwargs: Any) -> Generator:
"""Temporary setter.
Utility class for setting a temporary value for an attribute on a class.
Taken from: https://tinyurl.com/2p89xymh
"""
prev_values = {k: getattr(obj, k) for k in kwargs}
for k, v in kwargs.items():
setattr(obj, k, v)
try:
yield
finally:
for k, v in prev_values.items():
setattr(obj, k, v)
@dataclass
class ErrorToRetry:
"""Exception types that should be retried.
Args:
exception_cls (Type[Exception]): Class of exception.
check_fn (Optional[Callable[[Any]], bool]]):
A function that takes an exception instance as input and returns
whether to retry.
"""
exception_cls: Type[Exception]
check_fn: Optional[Callable[[Any], bool]] = None
def retry_on_exceptions_with_backoff(
lambda_fn: Callable,
errors_to_retry: List[ErrorToRetry],
max_tries: int = 10,
min_backoff_secs: float = 0.5,
max_backoff_secs: float = 60.0,
) -> Any:
"""Execute lambda function with retries and exponential backoff.
Args:
lambda_fn (Callable): Function to be called and output we want.
errors_to_retry (List[ErrorToRetry]): List of errors to retry.
At least one needs to be provided.
max_tries (int): Maximum number of tries, including the first. Defaults to 10.
min_backoff_secs (float): Minimum amount of backoff time between attempts.
Defaults to 0.5.
max_backoff_secs (float): Maximum amount of backoff time between attempts.
Defaults to 60.
"""
if not errors_to_retry:
raise ValueError("At least one error to retry needs to be provided")
error_checks = {
error_to_retry.exception_cls: error_to_retry.check_fn
for error_to_retry in errors_to_retry
}
exception_class_tuples = tuple(error_checks.keys())
backoff_secs = min_backoff_secs
tries = 0
while True:
try:
return lambda_fn()
except exception_class_tuples as e:
traceback.print_exc()
tries += 1
if tries >= max_tries:
raise
check_fn = error_checks.get(e.__class__)
if check_fn and not check_fn(e):
raise
time.sleep(backoff_secs)
backoff_secs = min(backoff_secs * 2, max_backoff_secs)
def truncate_text(text: str, max_length: int) -> str:
"""Truncate text to a maximum length."""
if len(text) <= max_length:
return text
return text[: max_length - 3] + "..."
def iter_batch(iterable: Union[Iterable, Generator], size: int) -> Iterable:
"""Iterate over an iterable in batches.
>>> list(iter_batch([1,2,3,4,5], 3))
[[1, 2, 3], [4, 5]]
"""
source_iter = iter(iterable)
while source_iter:
b = list(islice(source_iter, size))
if len(b) == 0:
break
yield b
def concat_dirs(dirname: str, basename: str) -> str:
"""
Append basename to dirname, avoiding backslashes when running on windows.
os.path.join(dirname, basename) will add a backslash before dirname if
basename does not end with a slash, so we make sure it does.
"""
dirname += "/" if dirname[-1] != "/" else ""
return os.path.join(dirname, basename)
def get_tqdm_iterable(items: Iterable, show_progress: bool, desc: str) -> Iterable:
"""
Optionally get a tqdm iterable. Ensures tqdm.auto is used.
"""
_iterator = items
if show_progress:
try:
from tqdm.auto import tqdm
return tqdm(items, desc=desc)
except ImportError:
pass
return _iterator
def count_tokens(text: str) -> int:
tokenizer = get_tokenizer()
tokens = tokenizer(text)
return len(tokens)
def get_transformer_tokenizer_fn(model_name: str) -> Callable[[str], List[str]]:
"""
Args:
model_name(str): the model name of the tokenizer.
For instance, fxmarty/tiny-llama-fast-tokenizer.
"""
try:
from transformers import AutoTokenizer
except ImportError:
raise ValueError(
"`transformers` package not found, please run `pip install transformers`"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
return tokenizer.tokenize
def get_cache_dir() -> str:
"""Locate a platform-appropriate cache directory for llama_index,
and create it if it doesn't yet exist.
"""
# User override
if "LLAMA_INDEX_CACHE_DIR" in os.environ:
path = Path(os.environ["LLAMA_INDEX_CACHE_DIR"])
# Linux, Unix, AIX, etc.
elif os.name == "posix" and sys.platform != "darwin":
path = Path("/tmp/llama_index")
# Mac OS
elif sys.platform == "darwin":
path = Path(os.path.expanduser("~"), "Library/Caches/llama_index")
# Windows (hopefully)
else:
local = os.environ.get("LOCALAPPDATA", None) or os.path.expanduser(
"~\\AppData\\Local"
)
path = Path(local, "llama_index")
if not os.path.exists(path):
os.makedirs(
path, exist_ok=True
) # prevents https://github.com/jerryjliu/llama_index/issues/7362
return str(path)
def add_sync_version(func: Any) -> Any:
"""Decorator for adding sync version of an async function. The sync version
is added as a function attribute to the original function, func.
Args:
func(Any): the async function for which a sync variant will be built.
"""
assert asyncio.iscoroutinefunction(func)
@wraps(func)
def _wrapper(*args: Any, **kwds: Any) -> Any:
return asyncio.get_event_loop().run_until_complete(func(*args, **kwds))
func.sync = _wrapper
return func
# Sample text from llama_index's readme
SAMPLE_TEXT = """
Context
LLMs are a phenomenal piece of technology for knowledge generation and reasoning.
They are pre-trained on large amounts of publicly available data.
How do we best augment LLMs with our own private data?
We need a comprehensive toolkit to help perform this data augmentation for LLMs.
Proposed Solution
That's where LlamaIndex comes in. LlamaIndex is a "data framework" to help
you build LLM apps. It provides the following tools:
Offers data connectors to ingest your existing data sources and data formats
(APIs, PDFs, docs, SQL, etc.)
Provides ways to structure your data (indices, graphs) so that this data can be
easily used with LLMs.
Provides an advanced retrieval/query interface over your data:
Feed in any LLM input prompt, get back retrieved context and knowledge-augmented output.
Allows easy integrations with your outer application framework
(e.g. with LangChain, Flask, Docker, ChatGPT, anything else).
LlamaIndex provides tools for both beginner users and advanced users.
Our high-level API allows beginner users to use LlamaIndex to ingest and
query their data in 5 lines of code. Our lower-level APIs allow advanced users to
customize and extend any module (data connectors, indices, retrievers, query engines,
reranking modules), to fit their needs.
"""
_LLAMA_INDEX_COLORS = {
"llama_pink": "38;2;237;90;200",
"llama_blue": "38;2;90;149;237",
"llama_turquoise": "38;2;11;159;203",
"llama_lavender": "38;2;155;135;227",
}
_ANSI_COLORS = {
"red": "31",
"green": "32",
"yellow": "33",
"blue": "34",
"magenta": "35",
"cyan": "36",
"pink": "38;5;200",
}
def get_color_mapping(
items: List[str], use_llama_index_colors: bool = True
) -> Dict[str, str]:
"""
Get a mapping of items to colors.
Args:
items (List[str]): List of items to be mapped to colors.
use_llama_index_colors (bool, optional): Flag to indicate
whether to use LlamaIndex colors or ANSI colors.
Defaults to True.
Returns:
Dict[str, str]: Mapping of items to colors.
"""
if use_llama_index_colors:
color_palette = _LLAMA_INDEX_COLORS
else:
color_palette = _ANSI_COLORS
colors = list(color_palette.keys())
return {item: colors[i % len(colors)] for i, item in enumerate(items)}
def _get_colored_text(text: str, color: str) -> str:
"""
Get the colored version of the input text.
Args:
text (str): Input text.
color (str): Color to be applied to the text.
Returns:
str: Colored version of the input text.
"""
all_colors = {**_LLAMA_INDEX_COLORS, **_ANSI_COLORS}
if color not in all_colors:
return f"\033[1;3m{text}\033[0m" # just bolded and italicized
color = all_colors[color]
return f"\033[1;3;{color}m{text}\033[0m"
def print_text(text: str, color: Optional[str] = None, end: str = "") -> None:
"""
Print the text with the specified color.
Args:
text (str): Text to be printed.
color (str, optional): Color to be applied to the text. Supported colors are:
llama_pink, llama_blue, llama_turquoise, llama_lavender,
red, green, yellow, blue, magenta, cyan, pink.
end (str, optional): String appended after the last character of the text.
Returns:
None
"""
text_to_print = _get_colored_text(text, color) if color is not None else text
print(text_to_print, end=end)
def infer_torch_device() -> str:
"""Infer the input to torch.device."""
try:
has_cuda = torch.cuda.is_available()
except NameError:
import torch
has_cuda = torch.cuda.is_available()
if has_cuda:
return "cuda"
if torch.backends.mps.is_available():
return "mps"
return "cpu"
def unit_generator(x: Any) -> Generator[Any, None, None]:
"""A function that returns a generator of a single element.
Args:
x (Any): the element to build yield
Yields:
Any: the single element
"""
yield x
async def async_unit_generator(x: Any) -> AsyncGenerator[Any, None]:
"""A function that returns a generator of a single element.
Args:
x (Any): the element to build yield
Yields:
Any: the single element
"""
yield x
| [
"llama_index.utils.get_cache_dir"
] | [((7192, 7223), 'os.path.join', 'os.path.join', (['dirname', 'basename'], {}), '(dirname, basename)\n', (7204, 7223), False, 'import os\n'), ((8174, 8215), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (8203, 8215), False, 'from transformers import AutoTokenizer\n'), ((9451, 9484), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (9478, 9484), False, 'import asyncio\n'), ((9491, 9502), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (9496, 9502), False, 'from functools import partial, wraps\n'), ((13466, 13499), 'torch.backends.mps.is_available', 'torch.backends.mps.is_available', ([], {}), '()\n', (13497, 13499), False, 'import torch\n'), ((3225, 3269), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (3252, 3269), False, 'import tiktoken\n'), ((3290, 3332), 'functools.partial', 'partial', (['enc.encode'], {'allowed_special': '"""all"""'}), "(enc.encode, allowed_special='all')\n", (3297, 3332), False, 'from functools import partial, wraps\n'), ((3740, 3770), 'random.randint', 'random.randint', (['(0)', 'sys.maxsize'], {}), '(0, sys.maxsize)\n', (3754, 3770), False, 'import random\n'), ((8478, 8519), 'pathlib.Path', 'Path', (["os.environ['LLAMA_INDEX_CACHE_DIR']"], {}), "(os.environ['LLAMA_INDEX_CACHE_DIR'])\n", (8482, 8519), False, 'from pathlib import Path\n'), ((8981, 9001), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8995, 9001), False, 'import os\n'), ((9011, 9043), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (9022, 9043), False, 'import os\n'), ((13305, 13330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13328, 13330), False, 'import torch\n'), ((1200, 1229), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""gpt2"""'], {}), "('gpt2')\n", (1221, 1229), False, 'import tiktoken\n'), ((1260, 1299), 'typing.cast', 'cast', (['Callable[[str], List]', 'enc.encode'], {}), '(Callable[[str], List], enc.encode)\n', (1264, 1299), False, 'from typing import Any, AsyncGenerator, Callable, Dict, Generator, Iterable, List, Optional, Protocol, Set, Type, Union, cast, runtime_checkable\n'), ((1330, 1377), 'functools.partial', 'partial', (['self._tokenizer'], {'allowed_special': '"""all"""'}), "(self._tokenizer, allowed_special='all')\n", (1337, 1377), False, 'from functools import partial, wraps\n'), ((1883, 1898), 'llama_index.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (1896, 1898), False, 'from llama_index.utils import get_cache_dir\n'), ((1927, 1965), 'os.environ.get', 'os.environ.get', (['"""NLTK_DATA"""', 'cache_dir'], {}), "('NLTK_DATA', cache_dir)\n", (1941, 1965), False, 'import os\n'), ((2341, 2367), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2356, 2367), False, 'from nltk.corpus import stopwords\n'), ((3560, 3572), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3570, 3572), False, 'import uuid\n'), ((6757, 6782), 'itertools.islice', 'islice', (['source_iter', 'size'], {}), '(source_iter, size)\n', (6763, 6782), False, 'from itertools import islice\n'), ((7505, 7527), 'tqdm.auto.tqdm', 'tqdm', (['items'], {'desc': 'desc'}), '(items, desc=desc)\n', (7509, 7527), False, 'from tqdm.auto import tqdm\n'), ((8623, 8647), 'pathlib.Path', 'Path', (['"""/tmp/llama_index"""'], {}), "('/tmp/llama_index')\n", (8627, 8647), False, 'from pathlib import Path\n'), ((13394, 13419), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13417, 13419), False, 'import torch\n'), ((2101, 2137), 'nltk.data.path.append', 'nltk.data.path.append', (['nltk_data_dir'], {}), '(nltk_data_dir)\n', (2122, 2137), False, 'import nltk\n'), ((2172, 2207), 'nltk.data.find', 'nltk.data.find', (['"""corpora/stopwords"""'], {}), "('corpora/stopwords')\n", (2186, 2207), False, 'import nltk\n'), ((5966, 5987), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5985, 5987), False, 'import traceback\n'), ((6200, 6224), 'time.sleep', 'time.sleep', (['backoff_secs'], {}), '(backoff_secs)\n', (6210, 6224), False, 'import time\n'), ((8942, 8968), 'pathlib.Path', 'Path', (['local', '"""llama_index"""'], {}), "(local, 'llama_index')\n", (8946, 8968), False, 'from pathlib import Path\n'), ((9568, 9592), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9590, 9592), False, 'import asyncio\n'), ((2256, 2310), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {'download_dir': 'nltk_data_dir'}), "('stopwords', download_dir=nltk_data_dir)\n", (2269, 2310), False, 'import nltk\n'), ((8717, 8740), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (8735, 8740), False, 'import os\n'), ((8825, 8861), 'os.environ.get', 'os.environ.get', (['"""LOCALAPPDATA"""', 'None'], {}), "('LOCALAPPDATA', None)\n", (8839, 8861), False, 'import os\n'), ((8865, 8904), 'os.path.expanduser', 'os.path.expanduser', (['"""~\\\\AppData\\\\Local"""'], {}), "('~\\\\AppData\\\\Local')\n", (8883, 8904), False, 'import os\n')] |
"""General utils functions."""
import asyncio
import os
import random
import sys
import time
import traceback
import uuid
from contextlib import contextmanager
from dataclasses import dataclass
from functools import partial, wraps
from itertools import islice
from pathlib import Path
from typing import (
Any,
AsyncGenerator,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Protocol,
Set,
Type,
Union,
cast,
runtime_checkable,
)
class GlobalsHelper:
"""Helper to retrieve globals.
Helpful for global caching of certain variables that can be expensive to load.
(e.g. tokenization)
"""
_tokenizer: Optional[Callable[[str], List]] = None
_stopwords: Optional[List[str]] = None
@property
def tokenizer(self) -> Callable[[str], List]:
"""Get tokenizer. TODO: Deprecated."""
if self._tokenizer is None:
tiktoken_import_err = (
"`tiktoken` package not found, please run `pip install tiktoken`"
)
try:
import tiktoken
except ImportError:
raise ImportError(tiktoken_import_err)
enc = tiktoken.get_encoding("gpt2")
self._tokenizer = cast(Callable[[str], List], enc.encode)
self._tokenizer = partial(self._tokenizer, allowed_special="all")
return self._tokenizer # type: ignore
@property
def stopwords(self) -> List[str]:
"""Get stopwords."""
if self._stopwords is None:
try:
import nltk
from nltk.corpus import stopwords
except ImportError:
raise ImportError(
"`nltk` package not found, please run `pip install nltk`"
)
from llama_index.utils import get_cache_dir
cache_dir = get_cache_dir()
nltk_data_dir = os.environ.get("NLTK_DATA", cache_dir)
# update nltk path for nltk so that it finds the data
if nltk_data_dir not in nltk.data.path:
nltk.data.path.append(nltk_data_dir)
try:
nltk.data.find("corpora/stopwords")
except LookupError:
nltk.download("stopwords", download_dir=nltk_data_dir)
self._stopwords = stopwords.words("english")
return self._stopwords
globals_helper = GlobalsHelper()
# Global Tokenizer
@runtime_checkable
class Tokenizer(Protocol):
def encode(self, text: str, *args: Any, **kwargs: Any) -> List[Any]:
...
def set_global_tokenizer(tokenizer: Union[Tokenizer, Callable[[str], list]]) -> None:
import llama_index
if isinstance(tokenizer, Tokenizer):
llama_index.global_tokenizer = tokenizer.encode
else:
llama_index.global_tokenizer = tokenizer
def get_tokenizer() -> Callable[[str], List]:
import llama_index
if llama_index.global_tokenizer is None:
tiktoken_import_err = (
"`tiktoken` package not found, please run `pip install tiktoken`"
)
try:
import tiktoken
except ImportError:
raise ImportError(tiktoken_import_err)
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
tokenizer = partial(enc.encode, allowed_special="all")
set_global_tokenizer(tokenizer)
assert llama_index.global_tokenizer is not None
return llama_index.global_tokenizer
def get_new_id(d: Set) -> str:
"""Get a new ID."""
while True:
new_id = str(uuid.uuid4())
if new_id not in d:
break
return new_id
def get_new_int_id(d: Set) -> int:
"""Get a new integer ID."""
while True:
new_id = random.randint(0, sys.maxsize)
if new_id not in d:
break
return new_id
@contextmanager
def temp_set_attrs(obj: Any, **kwargs: Any) -> Generator:
"""Temporary setter.
Utility class for setting a temporary value for an attribute on a class.
Taken from: https://tinyurl.com/2p89xymh
"""
prev_values = {k: getattr(obj, k) for k in kwargs}
for k, v in kwargs.items():
setattr(obj, k, v)
try:
yield
finally:
for k, v in prev_values.items():
setattr(obj, k, v)
@dataclass
class ErrorToRetry:
"""Exception types that should be retried.
Args:
exception_cls (Type[Exception]): Class of exception.
check_fn (Optional[Callable[[Any]], bool]]):
A function that takes an exception instance as input and returns
whether to retry.
"""
exception_cls: Type[Exception]
check_fn: Optional[Callable[[Any], bool]] = None
def retry_on_exceptions_with_backoff(
lambda_fn: Callable,
errors_to_retry: List[ErrorToRetry],
max_tries: int = 10,
min_backoff_secs: float = 0.5,
max_backoff_secs: float = 60.0,
) -> Any:
"""Execute lambda function with retries and exponential backoff.
Args:
lambda_fn (Callable): Function to be called and output we want.
errors_to_retry (List[ErrorToRetry]): List of errors to retry.
At least one needs to be provided.
max_tries (int): Maximum number of tries, including the first. Defaults to 10.
min_backoff_secs (float): Minimum amount of backoff time between attempts.
Defaults to 0.5.
max_backoff_secs (float): Maximum amount of backoff time between attempts.
Defaults to 60.
"""
if not errors_to_retry:
raise ValueError("At least one error to retry needs to be provided")
error_checks = {
error_to_retry.exception_cls: error_to_retry.check_fn
for error_to_retry in errors_to_retry
}
exception_class_tuples = tuple(error_checks.keys())
backoff_secs = min_backoff_secs
tries = 0
while True:
try:
return lambda_fn()
except exception_class_tuples as e:
traceback.print_exc()
tries += 1
if tries >= max_tries:
raise
check_fn = error_checks.get(e.__class__)
if check_fn and not check_fn(e):
raise
time.sleep(backoff_secs)
backoff_secs = min(backoff_secs * 2, max_backoff_secs)
def truncate_text(text: str, max_length: int) -> str:
"""Truncate text to a maximum length."""
if len(text) <= max_length:
return text
return text[: max_length - 3] + "..."
def iter_batch(iterable: Union[Iterable, Generator], size: int) -> Iterable:
"""Iterate over an iterable in batches.
>>> list(iter_batch([1,2,3,4,5], 3))
[[1, 2, 3], [4, 5]]
"""
source_iter = iter(iterable)
while source_iter:
b = list(islice(source_iter, size))
if len(b) == 0:
break
yield b
def concat_dirs(dirname: str, basename: str) -> str:
"""
Append basename to dirname, avoiding backslashes when running on windows.
os.path.join(dirname, basename) will add a backslash before dirname if
basename does not end with a slash, so we make sure it does.
"""
dirname += "/" if dirname[-1] != "/" else ""
return os.path.join(dirname, basename)
def get_tqdm_iterable(items: Iterable, show_progress: bool, desc: str) -> Iterable:
"""
Optionally get a tqdm iterable. Ensures tqdm.auto is used.
"""
_iterator = items
if show_progress:
try:
from tqdm.auto import tqdm
return tqdm(items, desc=desc)
except ImportError:
pass
return _iterator
def count_tokens(text: str) -> int:
tokenizer = get_tokenizer()
tokens = tokenizer(text)
return len(tokens)
def get_transformer_tokenizer_fn(model_name: str) -> Callable[[str], List[str]]:
"""
Args:
model_name(str): the model name of the tokenizer.
For instance, fxmarty/tiny-llama-fast-tokenizer.
"""
try:
from transformers import AutoTokenizer
except ImportError:
raise ValueError(
"`transformers` package not found, please run `pip install transformers`"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
return tokenizer.tokenize
def get_cache_dir() -> str:
"""Locate a platform-appropriate cache directory for llama_index,
and create it if it doesn't yet exist.
"""
# User override
if "LLAMA_INDEX_CACHE_DIR" in os.environ:
path = Path(os.environ["LLAMA_INDEX_CACHE_DIR"])
# Linux, Unix, AIX, etc.
elif os.name == "posix" and sys.platform != "darwin":
path = Path("/tmp/llama_index")
# Mac OS
elif sys.platform == "darwin":
path = Path(os.path.expanduser("~"), "Library/Caches/llama_index")
# Windows (hopefully)
else:
local = os.environ.get("LOCALAPPDATA", None) or os.path.expanduser(
"~\\AppData\\Local"
)
path = Path(local, "llama_index")
if not os.path.exists(path):
os.makedirs(
path, exist_ok=True
) # prevents https://github.com/jerryjliu/llama_index/issues/7362
return str(path)
def add_sync_version(func: Any) -> Any:
"""Decorator for adding sync version of an async function. The sync version
is added as a function attribute to the original function, func.
Args:
func(Any): the async function for which a sync variant will be built.
"""
assert asyncio.iscoroutinefunction(func)
@wraps(func)
def _wrapper(*args: Any, **kwds: Any) -> Any:
return asyncio.get_event_loop().run_until_complete(func(*args, **kwds))
func.sync = _wrapper
return func
# Sample text from llama_index's readme
SAMPLE_TEXT = """
Context
LLMs are a phenomenal piece of technology for knowledge generation and reasoning.
They are pre-trained on large amounts of publicly available data.
How do we best augment LLMs with our own private data?
We need a comprehensive toolkit to help perform this data augmentation for LLMs.
Proposed Solution
That's where LlamaIndex comes in. LlamaIndex is a "data framework" to help
you build LLM apps. It provides the following tools:
Offers data connectors to ingest your existing data sources and data formats
(APIs, PDFs, docs, SQL, etc.)
Provides ways to structure your data (indices, graphs) so that this data can be
easily used with LLMs.
Provides an advanced retrieval/query interface over your data:
Feed in any LLM input prompt, get back retrieved context and knowledge-augmented output.
Allows easy integrations with your outer application framework
(e.g. with LangChain, Flask, Docker, ChatGPT, anything else).
LlamaIndex provides tools for both beginner users and advanced users.
Our high-level API allows beginner users to use LlamaIndex to ingest and
query their data in 5 lines of code. Our lower-level APIs allow advanced users to
customize and extend any module (data connectors, indices, retrievers, query engines,
reranking modules), to fit their needs.
"""
_LLAMA_INDEX_COLORS = {
"llama_pink": "38;2;237;90;200",
"llama_blue": "38;2;90;149;237",
"llama_turquoise": "38;2;11;159;203",
"llama_lavender": "38;2;155;135;227",
}
_ANSI_COLORS = {
"red": "31",
"green": "32",
"yellow": "33",
"blue": "34",
"magenta": "35",
"cyan": "36",
"pink": "38;5;200",
}
def get_color_mapping(
items: List[str], use_llama_index_colors: bool = True
) -> Dict[str, str]:
"""
Get a mapping of items to colors.
Args:
items (List[str]): List of items to be mapped to colors.
use_llama_index_colors (bool, optional): Flag to indicate
whether to use LlamaIndex colors or ANSI colors.
Defaults to True.
Returns:
Dict[str, str]: Mapping of items to colors.
"""
if use_llama_index_colors:
color_palette = _LLAMA_INDEX_COLORS
else:
color_palette = _ANSI_COLORS
colors = list(color_palette.keys())
return {item: colors[i % len(colors)] for i, item in enumerate(items)}
def _get_colored_text(text: str, color: str) -> str:
"""
Get the colored version of the input text.
Args:
text (str): Input text.
color (str): Color to be applied to the text.
Returns:
str: Colored version of the input text.
"""
all_colors = {**_LLAMA_INDEX_COLORS, **_ANSI_COLORS}
if color not in all_colors:
return f"\033[1;3m{text}\033[0m" # just bolded and italicized
color = all_colors[color]
return f"\033[1;3;{color}m{text}\033[0m"
def print_text(text: str, color: Optional[str] = None, end: str = "") -> None:
"""
Print the text with the specified color.
Args:
text (str): Text to be printed.
color (str, optional): Color to be applied to the text. Supported colors are:
llama_pink, llama_blue, llama_turquoise, llama_lavender,
red, green, yellow, blue, magenta, cyan, pink.
end (str, optional): String appended after the last character of the text.
Returns:
None
"""
text_to_print = _get_colored_text(text, color) if color is not None else text
print(text_to_print, end=end)
def infer_torch_device() -> str:
"""Infer the input to torch.device."""
try:
has_cuda = torch.cuda.is_available()
except NameError:
import torch
has_cuda = torch.cuda.is_available()
if has_cuda:
return "cuda"
if torch.backends.mps.is_available():
return "mps"
return "cpu"
def unit_generator(x: Any) -> Generator[Any, None, None]:
"""A function that returns a generator of a single element.
Args:
x (Any): the element to build yield
Yields:
Any: the single element
"""
yield x
async def async_unit_generator(x: Any) -> AsyncGenerator[Any, None]:
"""A function that returns a generator of a single element.
Args:
x (Any): the element to build yield
Yields:
Any: the single element
"""
yield x
| [
"llama_index.utils.get_cache_dir"
] | [((7192, 7223), 'os.path.join', 'os.path.join', (['dirname', 'basename'], {}), '(dirname, basename)\n', (7204, 7223), False, 'import os\n'), ((8174, 8215), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (8203, 8215), False, 'from transformers import AutoTokenizer\n'), ((9451, 9484), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (9478, 9484), False, 'import asyncio\n'), ((9491, 9502), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (9496, 9502), False, 'from functools import partial, wraps\n'), ((13466, 13499), 'torch.backends.mps.is_available', 'torch.backends.mps.is_available', ([], {}), '()\n', (13497, 13499), False, 'import torch\n'), ((3225, 3269), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (3252, 3269), False, 'import tiktoken\n'), ((3290, 3332), 'functools.partial', 'partial', (['enc.encode'], {'allowed_special': '"""all"""'}), "(enc.encode, allowed_special='all')\n", (3297, 3332), False, 'from functools import partial, wraps\n'), ((3740, 3770), 'random.randint', 'random.randint', (['(0)', 'sys.maxsize'], {}), '(0, sys.maxsize)\n', (3754, 3770), False, 'import random\n'), ((8478, 8519), 'pathlib.Path', 'Path', (["os.environ['LLAMA_INDEX_CACHE_DIR']"], {}), "(os.environ['LLAMA_INDEX_CACHE_DIR'])\n", (8482, 8519), False, 'from pathlib import Path\n'), ((8981, 9001), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8995, 9001), False, 'import os\n'), ((9011, 9043), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (9022, 9043), False, 'import os\n'), ((13305, 13330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13328, 13330), False, 'import torch\n'), ((1200, 1229), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""gpt2"""'], {}), "('gpt2')\n", (1221, 1229), False, 'import tiktoken\n'), ((1260, 1299), 'typing.cast', 'cast', (['Callable[[str], List]', 'enc.encode'], {}), '(Callable[[str], List], enc.encode)\n', (1264, 1299), False, 'from typing import Any, AsyncGenerator, Callable, Dict, Generator, Iterable, List, Optional, Protocol, Set, Type, Union, cast, runtime_checkable\n'), ((1330, 1377), 'functools.partial', 'partial', (['self._tokenizer'], {'allowed_special': '"""all"""'}), "(self._tokenizer, allowed_special='all')\n", (1337, 1377), False, 'from functools import partial, wraps\n'), ((1883, 1898), 'llama_index.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (1896, 1898), False, 'from llama_index.utils import get_cache_dir\n'), ((1927, 1965), 'os.environ.get', 'os.environ.get', (['"""NLTK_DATA"""', 'cache_dir'], {}), "('NLTK_DATA', cache_dir)\n", (1941, 1965), False, 'import os\n'), ((2341, 2367), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2356, 2367), False, 'from nltk.corpus import stopwords\n'), ((3560, 3572), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3570, 3572), False, 'import uuid\n'), ((6757, 6782), 'itertools.islice', 'islice', (['source_iter', 'size'], {}), '(source_iter, size)\n', (6763, 6782), False, 'from itertools import islice\n'), ((7505, 7527), 'tqdm.auto.tqdm', 'tqdm', (['items'], {'desc': 'desc'}), '(items, desc=desc)\n', (7509, 7527), False, 'from tqdm.auto import tqdm\n'), ((8623, 8647), 'pathlib.Path', 'Path', (['"""/tmp/llama_index"""'], {}), "('/tmp/llama_index')\n", (8627, 8647), False, 'from pathlib import Path\n'), ((13394, 13419), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13417, 13419), False, 'import torch\n'), ((2101, 2137), 'nltk.data.path.append', 'nltk.data.path.append', (['nltk_data_dir'], {}), '(nltk_data_dir)\n', (2122, 2137), False, 'import nltk\n'), ((2172, 2207), 'nltk.data.find', 'nltk.data.find', (['"""corpora/stopwords"""'], {}), "('corpora/stopwords')\n", (2186, 2207), False, 'import nltk\n'), ((5966, 5987), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5985, 5987), False, 'import traceback\n'), ((6200, 6224), 'time.sleep', 'time.sleep', (['backoff_secs'], {}), '(backoff_secs)\n', (6210, 6224), False, 'import time\n'), ((8942, 8968), 'pathlib.Path', 'Path', (['local', '"""llama_index"""'], {}), "(local, 'llama_index')\n", (8946, 8968), False, 'from pathlib import Path\n'), ((9568, 9592), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9590, 9592), False, 'import asyncio\n'), ((2256, 2310), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {'download_dir': 'nltk_data_dir'}), "('stopwords', download_dir=nltk_data_dir)\n", (2269, 2310), False, 'import nltk\n'), ((8717, 8740), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (8735, 8740), False, 'import os\n'), ((8825, 8861), 'os.environ.get', 'os.environ.get', (['"""LOCALAPPDATA"""', 'None'], {}), "('LOCALAPPDATA', None)\n", (8839, 8861), False, 'import os\n'), ((8865, 8904), 'os.path.expanduser', 'os.path.expanduser', (['"""~\\\\AppData\\\\Local"""'], {}), "('~\\\\AppData\\\\Local')\n", (8883, 8904), False, 'import os\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler"
] | [((917, 952), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (937, 952), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1010, 1053), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1038, 1053), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1111, 1156), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1141, 1156), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1210, 1251), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1236, 1251), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1302, 1333), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1318, 1333), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr # type: ignore
from llama_index.indices.service_context import ServiceContext
from llama_index.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
google_service_context = ServiceContext.from_defaults(
# Avoids instantiating OpenAI as the default model.
llm=None,
# Avoids instantiating HuggingFace as the default model.
embed_model=None,
)
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id")
index = VectorStoreIndex.from_vector_store(
google_vector_store,
service_context=google_service_context)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(cls, *, corpus_id: str) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id: ID of an existing corpus on Google's server.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(corpus_id=corpus_id, client=client)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.vector_stores.types.VectorStoreQuery`.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
return VectorStoreQueryResult(
nodes=[
TextNode(
text=chunk.chunk.data.string_value,
id_=_extract_chunk_id(chunk.chunk.name),
)
for chunk in relevant_chunks
],
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str",
"llama_index.vector_stores.google.generativeai.genai_extension.get_corpus",
"llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever",
"llama_index.bridge.pydantic.Field",
"llama_index.vector_stores.google.generativeai.genai_extension.delete_document",
"llama_index.vector_stores.google.generativeai.genai_extension.create_corpus",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.schema.RelatedNodeInfo",
"llama_index.vector_stores.google.generativeai.genai_extension.get_document",
"llama_index.vector_stores.google.generativeai.genai_extension.set_config",
"llama_index.indices.service_context.ServiceContext.from_defaults",
"llama_index.vector_stores.google.generativeai.genai_extension.Config"
] | [((843, 870), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (860, 870), False, 'import logging\n'), ((1036, 1092), 'llama_index.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'None'}), '(llm=None, embed_model=None)\n', (1064, 1092), False, 'from llama_index.indices.service_context import ServiceContext\n'), ((3113, 3135), 'llama_index.vector_stores.google.generativeai.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (3126, 3135), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((3140, 3165), 'llama_index.vector_stores.google.generativeai.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (3157, 3165), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4180, 4198), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4185, 4198), False, 'from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4288, 4301), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4299, 4301), False, 'from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((5550, 5583), 'llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (5581, 5583), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7243, 7276), 'llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (7274, 7276), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7353, 7444), 'llama_index.vector_stores.google.generativeai.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (7373, 7444), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7478, 7521), 'llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (7504, 7521), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9606, 9653), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (9610, 9653), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11370, 11417), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (11374, 11417), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11426, 11517), 'llama_index.vector_stores.google.generativeai.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (11448, 11517), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((13029, 13076), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13033, 13076), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((14910, 14949), 'llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (14936, 14949), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5595, 5648), 'llama_index.vector_stores.google.generativeai.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (5612, 5648), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9859, 9948), 'llama_index.vector_stores.google.generativeai.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (9878, 9948), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((15574, 15614), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (15589, 15614), False, 'from llama_index.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((7318, 7330), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7328, 7330), False, 'import uuid\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr # type: ignore
from llama_index.indices.service_context import ServiceContext
from llama_index.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
google_service_context = ServiceContext.from_defaults(
# Avoids instantiating OpenAI as the default model.
llm=None,
# Avoids instantiating HuggingFace as the default model.
embed_model=None,
)
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id")
index = VectorStoreIndex.from_vector_store(
google_vector_store,
service_context=google_service_context)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(cls, *, corpus_id: str) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id: ID of an existing corpus on Google's server.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(corpus_id=corpus_id, client=client)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.vector_stores.types.VectorStoreQuery`.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
return VectorStoreQueryResult(
nodes=[
TextNode(
text=chunk.chunk.data.string_value,
id_=_extract_chunk_id(chunk.chunk.name),
)
for chunk in relevant_chunks
],
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str",
"llama_index.vector_stores.google.generativeai.genai_extension.get_corpus",
"llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever",
"llama_index.bridge.pydantic.Field",
"llama_index.vector_stores.google.generativeai.genai_extension.delete_document",
"llama_index.vector_stores.google.generativeai.genai_extension.create_corpus",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.schema.RelatedNodeInfo",
"llama_index.vector_stores.google.generativeai.genai_extension.get_document",
"llama_index.vector_stores.google.generativeai.genai_extension.set_config",
"llama_index.indices.service_context.ServiceContext.from_defaults",
"llama_index.vector_stores.google.generativeai.genai_extension.Config"
] | [((843, 870), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (860, 870), False, 'import logging\n'), ((1036, 1092), 'llama_index.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'None'}), '(llm=None, embed_model=None)\n', (1064, 1092), False, 'from llama_index.indices.service_context import ServiceContext\n'), ((3113, 3135), 'llama_index.vector_stores.google.generativeai.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (3126, 3135), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((3140, 3165), 'llama_index.vector_stores.google.generativeai.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (3157, 3165), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4180, 4198), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4185, 4198), False, 'from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4288, 4301), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4299, 4301), False, 'from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((5550, 5583), 'llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (5581, 5583), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7243, 7276), 'llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (7274, 7276), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7353, 7444), 'llama_index.vector_stores.google.generativeai.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (7373, 7444), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7478, 7521), 'llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (7504, 7521), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9606, 9653), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (9610, 9653), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11370, 11417), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (11374, 11417), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11426, 11517), 'llama_index.vector_stores.google.generativeai.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (11448, 11517), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((13029, 13076), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13033, 13076), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((14910, 14949), 'llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (14936, 14949), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5595, 5648), 'llama_index.vector_stores.google.generativeai.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (5612, 5648), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9859, 9948), 'llama_index.vector_stores.google.generativeai.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (9878, 9948), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((15574, 15614), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (15589, 15614), False, 'from llama_index.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((7318, 7330), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7328, 7330), False, 'import uuid\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr # type: ignore
from llama_index.indices.service_context import ServiceContext
from llama_index.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
google_service_context = ServiceContext.from_defaults(
# Avoids instantiating OpenAI as the default model.
llm=None,
# Avoids instantiating HuggingFace as the default model.
embed_model=None,
)
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id")
index = VectorStoreIndex.from_vector_store(
google_vector_store,
service_context=google_service_context)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(cls, *, corpus_id: str) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id: ID of an existing corpus on Google's server.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(corpus_id=corpus_id, client=client)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.vector_stores.types.VectorStoreQuery`.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
return VectorStoreQueryResult(
nodes=[
TextNode(
text=chunk.chunk.data.string_value,
id_=_extract_chunk_id(chunk.chunk.name),
)
for chunk in relevant_chunks
],
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str",
"llama_index.vector_stores.google.generativeai.genai_extension.get_corpus",
"llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever",
"llama_index.bridge.pydantic.Field",
"llama_index.vector_stores.google.generativeai.genai_extension.delete_document",
"llama_index.vector_stores.google.generativeai.genai_extension.create_corpus",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.schema.RelatedNodeInfo",
"llama_index.vector_stores.google.generativeai.genai_extension.get_document",
"llama_index.vector_stores.google.generativeai.genai_extension.set_config",
"llama_index.indices.service_context.ServiceContext.from_defaults",
"llama_index.vector_stores.google.generativeai.genai_extension.Config"
] | [((843, 870), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (860, 870), False, 'import logging\n'), ((1036, 1092), 'llama_index.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'None'}), '(llm=None, embed_model=None)\n', (1064, 1092), False, 'from llama_index.indices.service_context import ServiceContext\n'), ((3113, 3135), 'llama_index.vector_stores.google.generativeai.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (3126, 3135), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((3140, 3165), 'llama_index.vector_stores.google.generativeai.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (3157, 3165), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4180, 4198), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4185, 4198), False, 'from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4288, 4301), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4299, 4301), False, 'from llama_index.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((5550, 5583), 'llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (5581, 5583), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7243, 7276), 'llama_index.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (7274, 7276), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7353, 7444), 'llama_index.vector_stores.google.generativeai.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (7373, 7444), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7478, 7521), 'llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (7504, 7521), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9606, 9653), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (9610, 9653), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11370, 11417), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (11374, 11417), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11426, 11517), 'llama_index.vector_stores.google.generativeai.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (11448, 11517), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((13029, 13076), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13033, 13076), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((14910, 14949), 'llama_index.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (14936, 14949), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5595, 5648), 'llama_index.vector_stores.google.generativeai.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (5612, 5648), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9859, 9948), 'llama_index.vector_stores.google.generativeai.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (9878, 9948), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((15574, 15614), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (15589, 15614), False, 'from llama_index.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((7318, 7330), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7328, 7330), False, 'import uuid\n')] |
import dataclasses
import logging
from dataclasses import dataclass
from typing import Optional
from langchain.base_language import BaseLanguageModel
import llama_index
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser
from llama_index.node_parser.simple import SimpleNodeParser
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SimpleNodeParser.from_defaults(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata = dataclasses.replace(llm_metadata, context_window=context_window)
if num_output is not None:
llm_metadata = dataclasses.replace(llm_metadata, num_output=num_output)
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
node_parser: NodeParser
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[BaseLanguageModel] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
llama_logger=llama_logger,
callback_manager=callback_manager,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
)
callback_manager = callback_manager or CallbackManager([])
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or LLMPredictor()
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or OpenAIEmbedding()
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.get_llm_metadata(),
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[BaseLanguageModel] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or service_context.embed_model
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.get_llm_metadata(),
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or service_context.node_parser
if chunk_size is not None or chunk_overlap is not None:
node_parser = _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.langchain_helpers.chain_wrapper.LLMPredictor",
"llama_index.callbacks.base.CallbackManager",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults",
"llama_index.logger.LlamaLogger",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((709, 736), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (726, 736), False, 'import logging\n'), ((967, 1089), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (997, 1089), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1562, 1619), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1592, 1619), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((1375, 1439), 'dataclasses.replace', 'dataclasses.replace', (['llm_metadata'], {'context_window': 'context_window'}), '(llm_metadata, context_window=context_window)\n', (1394, 1439), False, 'import dataclasses\n'), ((1494, 1550), 'dataclasses.replace', 'dataclasses.replace', (['llm_metadata'], {'num_output': 'num_output'}), '(llm_metadata, num_output=num_output)\n', (1513, 1550), False, 'import dataclasses\n'), ((4762, 4781), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (4777, 4781), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((4958, 4979), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (4970, 4979), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n'), ((5021, 5035), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {}), '()\n', (5033, 5035), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n'), ((5190, 5207), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (5205, 5207), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((5718, 5731), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (5729, 5731), False, 'from llama_index.logger import LlamaLogger\n'), ((7437, 7458), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (7449, 7458), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n')] |
from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.core
llama_index.core.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
try:
from llama_index.callbacks.wandb import (
WandbCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"WandbCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-wandb`"
)
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
try:
from llama_index.callbacks.openinference import (
OpenInferenceCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"OpenInferenceCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-openinference`"
)
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
try:
from llama_index.callbacks.arize_phoenix import (
arize_phoenix_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArizePhoenixCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-arize-phoenix`"
)
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
try:
from llama_index.callbacks.honeyhive import (
honeyhive_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"HoneyHiveCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-honeyhive`"
)
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
try:
from llama_index.callbacks.promptlayer import (
PromptLayerHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"PromptLayerHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-promptlayer`"
)
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
try:
from llama_index.callbacks.deepeval import (
deepeval_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"DeepEvalCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-deepeval`"
)
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
try:
from llama_index.callbacks.argilla import (
argilla_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArgillaCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-argilla`"
)
handler = argilla_callback_handler(**eval_params)
elif eval_mode == "langfuse":
try:
from llama_index.callbacks.langfuse import (
langfuse_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"LangfuseCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-langfuse`"
)
handler = langfuse_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.openinference.OpenInferenceCallbackHandler",
"llama_index.callbacks.promptlayer.PromptLayerHandler",
"llama_index.callbacks.deepeval.deepeval_callback_handler",
"llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler",
"llama_index.callbacks.wandb.WandbCallbackHandler",
"llama_index.callbacks.honeyhive.honeyhive_callback_handler",
"llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.argilla.argilla_callback_handler",
"llama_index.callbacks.langfuse.langfuse_callback_handler"
] | [((941, 976), 'llama_index.callbacks.wandb.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (961, 976), False, 'from llama_index.callbacks.wandb import WandbCallbackHandler\n'), ((1424, 1467), 'llama_index.callbacks.openinference.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1452, 1467), False, 'from llama_index.callbacks.openinference import OpenInferenceCallbackHandler\n'), ((1916, 1961), 'llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1946, 1961), False, 'from llama_index.callbacks.arize_phoenix import arize_phoenix_callback_handler\n'), ((2390, 2431), 'llama_index.callbacks.honeyhive.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (2416, 2431), False, 'from llama_index.callbacks.honeyhive import honeyhive_callback_handler\n'), ((2852, 2885), 'llama_index.callbacks.promptlayer.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (2870, 2885), False, 'from llama_index.callbacks.promptlayer import PromptLayerHandler\n'), ((3309, 3349), 'llama_index.callbacks.deepeval.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (3334, 3349), False, 'from llama_index.callbacks.deepeval import deepeval_callback_handler\n'), ((3400, 3431), 'llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (3416, 3431), False, 'from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((3850, 3889), 'llama_index.callbacks.argilla.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (3874, 3889), False, 'from llama_index.callbacks.argilla import argilla_callback_handler\n'), ((4313, 4353), 'llama_index.callbacks.langfuse.langfuse_callback_handler', 'langfuse_callback_handler', ([], {}), '(**eval_params)\n', (4338, 4353), False, 'from llama_index.callbacks.langfuse import langfuse_callback_handler\n')] |
from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.core
llama_index.core.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
try:
from llama_index.callbacks.wandb import (
WandbCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"WandbCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-wandb`"
)
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
try:
from llama_index.callbacks.openinference import (
OpenInferenceCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"OpenInferenceCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-openinference`"
)
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
try:
from llama_index.callbacks.arize_phoenix import (
arize_phoenix_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArizePhoenixCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-arize-phoenix`"
)
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
try:
from llama_index.callbacks.honeyhive import (
honeyhive_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"HoneyHiveCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-honeyhive`"
)
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
try:
from llama_index.callbacks.promptlayer import (
PromptLayerHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"PromptLayerHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-promptlayer`"
)
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
try:
from llama_index.callbacks.deepeval import (
deepeval_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"DeepEvalCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-deepeval`"
)
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
try:
from llama_index.callbacks.argilla import (
argilla_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArgillaCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-argilla`"
)
handler = argilla_callback_handler(**eval_params)
elif eval_mode == "langfuse":
try:
from llama_index.callbacks.langfuse import (
langfuse_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"LangfuseCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-langfuse`"
)
handler = langfuse_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.openinference.OpenInferenceCallbackHandler",
"llama_index.callbacks.promptlayer.PromptLayerHandler",
"llama_index.callbacks.deepeval.deepeval_callback_handler",
"llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler",
"llama_index.callbacks.wandb.WandbCallbackHandler",
"llama_index.callbacks.honeyhive.honeyhive_callback_handler",
"llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.argilla.argilla_callback_handler",
"llama_index.callbacks.langfuse.langfuse_callback_handler"
] | [((941, 976), 'llama_index.callbacks.wandb.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (961, 976), False, 'from llama_index.callbacks.wandb import WandbCallbackHandler\n'), ((1424, 1467), 'llama_index.callbacks.openinference.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1452, 1467), False, 'from llama_index.callbacks.openinference import OpenInferenceCallbackHandler\n'), ((1916, 1961), 'llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1946, 1961), False, 'from llama_index.callbacks.arize_phoenix import arize_phoenix_callback_handler\n'), ((2390, 2431), 'llama_index.callbacks.honeyhive.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (2416, 2431), False, 'from llama_index.callbacks.honeyhive import honeyhive_callback_handler\n'), ((2852, 2885), 'llama_index.callbacks.promptlayer.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (2870, 2885), False, 'from llama_index.callbacks.promptlayer import PromptLayerHandler\n'), ((3309, 3349), 'llama_index.callbacks.deepeval.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (3334, 3349), False, 'from llama_index.callbacks.deepeval import deepeval_callback_handler\n'), ((3400, 3431), 'llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (3416, 3431), False, 'from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((3850, 3889), 'llama_index.callbacks.argilla.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (3874, 3889), False, 'from llama_index.callbacks.argilla import argilla_callback_handler\n'), ((4313, 4353), 'llama_index.callbacks.langfuse.langfuse_callback_handler', 'langfuse_callback_handler', ([], {}), '(**eval_params)\n', (4338, 4353), False, 'from llama_index.callbacks.langfuse import langfuse_callback_handler\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import ( # type: ignore
BaseModel,
Field,
PrivateAttr,
)
from llama_index.legacy.indices.service_context import ServiceContext
from llama_index.legacy.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
google_service_context = ServiceContext.from_defaults(
# Avoids instantiating OpenAI as the default model.
llm=None,
# Avoids instantiating HuggingFace as the default model.
embed_model=None,
)
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id")
index = VectorStoreIndex.from_vector_store(
google_vector_store,
service_context=google_service_context)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(cls, *, corpus_id: str) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id: ID of an existing corpus on Google's server.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(corpus_id=corpus_id, client=client)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.vector_stores.types.VectorStoreQuery`.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
return VectorStoreQueryResult(
nodes=[
TextNode(
text=chunk.chunk.data.string_value,
id_=_extract_chunk_id(chunk.chunk.name),
)
for chunk in relevant_chunks
],
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.create_corpus",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_document",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.Config",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.delete_document",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.set_config",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.indices.service_context.ServiceContext.from_defaults",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_corpus",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str",
"llama_index.legacy.schema.RelatedNodeInfo"
] | [((888, 915), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (905, 915), False, 'import logging\n'), ((1081, 1137), 'llama_index.legacy.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'None'}), '(llm=None, embed_model=None)\n', (1109, 1137), False, 'from llama_index.legacy.indices.service_context import ServiceContext\n'), ((3165, 3187), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (3178, 3187), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((3192, 3217), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (3209, 3217), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4232, 4250), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4237, 4250), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4340, 4353), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4351, 4353), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((5609, 5642), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (5640, 5642), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7309, 7342), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (7340, 7342), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7419, 7510), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (7439, 7510), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7544, 7587), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (7570, 7587), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9679, 9726), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (9683, 9726), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11450, 11497), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (11454, 11497), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11506, 11597), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (11528, 11597), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((13116, 13163), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13120, 13163), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((15004, 15043), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (15030, 15043), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5654, 5707), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (5671, 5707), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9932, 10021), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (9951, 10021), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((15668, 15708), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (15683, 15708), False, 'from llama_index.legacy.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((7384, 7396), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7394, 7396), False, 'import uuid\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import ( # type: ignore
BaseModel,
Field,
PrivateAttr,
)
from llama_index.legacy.indices.service_context import ServiceContext
from llama_index.legacy.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
google_service_context = ServiceContext.from_defaults(
# Avoids instantiating OpenAI as the default model.
llm=None,
# Avoids instantiating HuggingFace as the default model.
embed_model=None,
)
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id")
index = VectorStoreIndex.from_vector_store(
google_vector_store,
service_context=google_service_context)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(cls, *, corpus_id: str) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id: ID of an existing corpus on Google's server.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(corpus_id=corpus_id, client=client)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.vector_stores.types.VectorStoreQuery`.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
return VectorStoreQueryResult(
nodes=[
TextNode(
text=chunk.chunk.data.string_value,
id_=_extract_chunk_id(chunk.chunk.name),
)
for chunk in relevant_chunks
],
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.create_corpus",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_document",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.Config",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.delete_document",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.set_config",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.indices.service_context.ServiceContext.from_defaults",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_corpus",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str",
"llama_index.legacy.schema.RelatedNodeInfo"
] | [((888, 915), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (905, 915), False, 'import logging\n'), ((1081, 1137), 'llama_index.legacy.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'None'}), '(llm=None, embed_model=None)\n', (1109, 1137), False, 'from llama_index.legacy.indices.service_context import ServiceContext\n'), ((3165, 3187), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (3178, 3187), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((3192, 3217), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (3209, 3217), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4232, 4250), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4237, 4250), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4340, 4353), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4351, 4353), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((5609, 5642), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (5640, 5642), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7309, 7342), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (7340, 7342), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7419, 7510), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (7439, 7510), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((7544, 7587), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (7570, 7587), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9679, 9726), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (9683, 9726), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11450, 11497), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (11454, 11497), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((11506, 11597), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (11528, 11597), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((13116, 13163), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13120, 13163), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((15004, 15043), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (15030, 15043), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5654, 5707), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (5671, 5707), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((9932, 10021), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (9951, 10021), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((15668, 15708), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (15683, 15708), False, 'from llama_index.legacy.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((7384, 7396), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7394, 7396), False, 'import uuid\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.legacy.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index.legacy", "import llama_index.legacy"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index.legacy", "from llama_index.legacy"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
skip_load: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
if skip_load:
return None
# loads the module into memory
if override_path:
path = f"{dirpath}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
else:
path = f"{dirpath}/{module_id}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.legacy.download.utils.initialize_directory",
"llama_index.legacy.download.utils.get_exports"
] | [((645, 672), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (662, 672), False, 'import logging\n'), ((5586, 5619), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5600, 5619), False, 'import os\n'), ((7468, 7536), 'llama_index.legacy.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7488, 7536), False, 'from llama_index.legacy.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8866, 8893), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8887, 8893), False, 'from importlib import util\n'), ((1205, 1225), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1209, 1225), False, 'from pathlib import Path\n'), ((1442, 1476), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1456, 1476), False, 'import os\n'), ((1946, 1977), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1956, 1977), False, 'import json\n'), ((2271, 2306), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2286, 2306), False, 'import os\n'), ((3139, 3159), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3143, 3159), False, 'from pathlib import Path\n'), ((3355, 3394), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3366, 3394), False, 'import os\n'), ((5175, 5208), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5189, 5208), False, 'import os\n'), ((8474, 8534), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8502, 8534), False, 'from importlib import util\n'), ((8704, 8764), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8732, 8764), False, 'from importlib import util\n'), ((9316, 9418), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9329, 9418), False, 'import requests\n'), ((1544, 1556), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1553, 1556), False, 'import json\n'), ((3318, 3345), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3332, 3345), False, 'import os\n'), ((4421, 4456), 'llama_index.legacy.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4432, 4456), False, 'from llama_index.legacy.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2364, 2397), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2378, 2397), False, 'import os\n'), ((2415, 2445), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2426, 2445), False, 'import os\n'), ((5871, 5963), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5892, 5963), False, 'import subprocess\n'), ((4656, 4686), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4670, 4686), False, 'import os\n'), ((5711, 5734), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5715, 5734), False, 'from pathlib import Path\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.legacy.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index.legacy", "import llama_index.legacy"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index.legacy", "from llama_index.legacy"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
skip_load: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
if skip_load:
return None
# loads the module into memory
if override_path:
path = f"{dirpath}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
else:
path = f"{dirpath}/{module_id}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.legacy.download.utils.initialize_directory",
"llama_index.legacy.download.utils.get_exports"
] | [((645, 672), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (662, 672), False, 'import logging\n'), ((5586, 5619), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5600, 5619), False, 'import os\n'), ((7468, 7536), 'llama_index.legacy.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7488, 7536), False, 'from llama_index.legacy.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8866, 8893), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8887, 8893), False, 'from importlib import util\n'), ((1205, 1225), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1209, 1225), False, 'from pathlib import Path\n'), ((1442, 1476), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1456, 1476), False, 'import os\n'), ((1946, 1977), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1956, 1977), False, 'import json\n'), ((2271, 2306), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2286, 2306), False, 'import os\n'), ((3139, 3159), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3143, 3159), False, 'from pathlib import Path\n'), ((3355, 3394), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3366, 3394), False, 'import os\n'), ((5175, 5208), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5189, 5208), False, 'import os\n'), ((8474, 8534), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8502, 8534), False, 'from importlib import util\n'), ((8704, 8764), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8732, 8764), False, 'from importlib import util\n'), ((9316, 9418), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9329, 9418), False, 'import requests\n'), ((1544, 1556), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1553, 1556), False, 'import json\n'), ((3318, 3345), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3332, 3345), False, 'import os\n'), ((4421, 4456), 'llama_index.legacy.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4432, 4456), False, 'from llama_index.legacy.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2364, 2397), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2378, 2397), False, 'import os\n'), ((2415, 2445), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2426, 2445), False, 'import os\n'), ((5871, 5963), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5892, 5963), False, 'import subprocess\n'), ((4656, 4686), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4670, 4686), False, 'import os\n'), ((5711, 5734), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5715, 5734), False, 'from pathlib import Path\n')] |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
import llama_index.legacy
from llama_index.legacy.bridge.pydantic import BaseModel
from llama_index.legacy.callbacks.base import CallbackManager
from llama_index.legacy.core.embeddings.base import BaseEmbedding
from llama_index.legacy.indices.prompt_helper import PromptHelper
from llama_index.legacy.llm_predictor import LLMPredictor
from llama_index.legacy.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.llms.utils import LLMType, resolve_llm
from llama_index.legacy.logger import LlamaLogger
from llama_index.legacy.node_parser.interface import NodeParser, TextSplitter
from llama_index.legacy.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.legacy.prompts.base import BasePromptTemplate
from llama_index.legacy.schema import TransformComponent
from llama_index.legacy.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.legacy.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.legacy.global_service_context is not None:
return cls.from_service_context(
llama_index.legacy.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.legacy.embeddings.loading import load_embed_model
from llama_index.legacy.extractors.loading import load_extractor
from llama_index.legacy.llm_predictor.loading import load_predictor
from llama_index.legacy.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.legacy.global_service_context = service_context
| [
"llama_index.legacy.node_parser.loading.load_parser",
"llama_index.legacy.callbacks.base.CallbackManager",
"llama_index.legacy.llm_predictor.LLMPredictor",
"llama_index.legacy.embeddings.loading.load_embed_model",
"llama_index.legacy.logger.LlamaLogger",
"llama_index.legacy.llms.utils.resolve_llm",
"llama_index.legacy.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.legacy.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.legacy.embeddings.utils.resolve_embed_model",
"llama_index.legacy.extractors.loading.load_extractor",
"llama_index.legacy.llm_predictor.loading.load_predictor"
] | [((1067, 1094), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1084, 1094), False, 'import logging\n'), ((1869, 1926), 'llama_index.legacy.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1899, 1926), False, 'from llama_index.legacy.indices.prompt_helper import PromptHelper\n'), ((5247, 5275), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5251, 5275), False, 'from typing import Any, List, Optional, cast\n'), ((7708, 7740), 'llama_index.legacy.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7727, 7740), False, 'from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10159, 10187), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10163, 10187), False, 'from typing import Any, List, Optional, cast\n'), ((11403, 11435), 'llama_index.legacy.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11422, 11435), False, 'from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14605, 14655), 'llama_index.legacy.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14619, 14655), False, 'from llama_index.legacy.llm_predictor.loading import load_predictor\n'), ((14679, 14729), 'llama_index.legacy.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14695, 14729), False, 'from llama_index.legacy.embeddings.loading import load_embed_model\n'), ((14755, 14813), 'llama_index.legacy.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14777, 14813), False, 'from llama_index.legacy.indices.prompt_helper import PromptHelper\n'), ((6452, 6471), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6467, 6471), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((6639, 6655), 'llama_index.legacy.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6650, 6655), False, 'from llama_index.legacy.llms.utils import LLMType, resolve_llm\n'), ((7087, 7153), 'llama_index.legacy.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (7099, 7153), False, 'from llama_index.legacy.llm_predictor import LLMPredictor\n'), ((8616, 8629), 'llama_index.legacy.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8627, 8629), False, 'from llama_index.legacy.logger import LlamaLogger\n'), ((10698, 10714), 'llama_index.legacy.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10709, 10714), False, 'from llama_index.legacy.llms.utils import LLMType, resolve_llm\n'), ((10743, 10764), 'llama_index.legacy.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10755, 10764), False, 'from llama_index.legacy.llm_predictor import LLMPredictor\n'), ((1468, 1485), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1483, 1485), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((14989, 15011), 'llama_index.legacy.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (15000, 15011), False, 'from llama_index.legacy.node_parser.loading import load_parser\n'), ((15083, 15108), 'llama_index.legacy.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (15097, 15108), False, 'from llama_index.legacy.extractors.loading import load_extractor\n')] |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
import llama_index.legacy
from llama_index.legacy.bridge.pydantic import BaseModel
from llama_index.legacy.callbacks.base import CallbackManager
from llama_index.legacy.core.embeddings.base import BaseEmbedding
from llama_index.legacy.indices.prompt_helper import PromptHelper
from llama_index.legacy.llm_predictor import LLMPredictor
from llama_index.legacy.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.llms.utils import LLMType, resolve_llm
from llama_index.legacy.logger import LlamaLogger
from llama_index.legacy.node_parser.interface import NodeParser, TextSplitter
from llama_index.legacy.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.legacy.prompts.base import BasePromptTemplate
from llama_index.legacy.schema import TransformComponent
from llama_index.legacy.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.legacy.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.legacy.global_service_context is not None:
return cls.from_service_context(
llama_index.legacy.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.legacy.embeddings.loading import load_embed_model
from llama_index.legacy.extractors.loading import load_extractor
from llama_index.legacy.llm_predictor.loading import load_predictor
from llama_index.legacy.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.legacy.global_service_context = service_context
| [
"llama_index.legacy.node_parser.loading.load_parser",
"llama_index.legacy.callbacks.base.CallbackManager",
"llama_index.legacy.llm_predictor.LLMPredictor",
"llama_index.legacy.embeddings.loading.load_embed_model",
"llama_index.legacy.logger.LlamaLogger",
"llama_index.legacy.llms.utils.resolve_llm",
"llama_index.legacy.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.legacy.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.legacy.embeddings.utils.resolve_embed_model",
"llama_index.legacy.extractors.loading.load_extractor",
"llama_index.legacy.llm_predictor.loading.load_predictor"
] | [((1067, 1094), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1084, 1094), False, 'import logging\n'), ((1869, 1926), 'llama_index.legacy.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1899, 1926), False, 'from llama_index.legacy.indices.prompt_helper import PromptHelper\n'), ((5247, 5275), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5251, 5275), False, 'from typing import Any, List, Optional, cast\n'), ((7708, 7740), 'llama_index.legacy.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7727, 7740), False, 'from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10159, 10187), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10163, 10187), False, 'from typing import Any, List, Optional, cast\n'), ((11403, 11435), 'llama_index.legacy.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11422, 11435), False, 'from llama_index.legacy.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14605, 14655), 'llama_index.legacy.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14619, 14655), False, 'from llama_index.legacy.llm_predictor.loading import load_predictor\n'), ((14679, 14729), 'llama_index.legacy.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14695, 14729), False, 'from llama_index.legacy.embeddings.loading import load_embed_model\n'), ((14755, 14813), 'llama_index.legacy.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14777, 14813), False, 'from llama_index.legacy.indices.prompt_helper import PromptHelper\n'), ((6452, 6471), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6467, 6471), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((6639, 6655), 'llama_index.legacy.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6650, 6655), False, 'from llama_index.legacy.llms.utils import LLMType, resolve_llm\n'), ((7087, 7153), 'llama_index.legacy.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (7099, 7153), False, 'from llama_index.legacy.llm_predictor import LLMPredictor\n'), ((8616, 8629), 'llama_index.legacy.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8627, 8629), False, 'from llama_index.legacy.logger import LlamaLogger\n'), ((10698, 10714), 'llama_index.legacy.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10709, 10714), False, 'from llama_index.legacy.llms.utils import LLMType, resolve_llm\n'), ((10743, 10764), 'llama_index.legacy.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10755, 10764), False, 'from llama_index.legacy.llm_predictor import LLMPredictor\n'), ((1468, 1485), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1483, 1485), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((14989, 15011), 'llama_index.legacy.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (15000, 15011), False, 'from llama_index.legacy.node_parser.loading import load_parser\n'), ((15083, 15108), 'llama_index.legacy.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (15097, 15108), False, 'from llama_index.legacy.extractors.loading import load_extractor\n')] |
"""Astra DB."""
from typing import Any, List, Optional
import llama_index.core
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class AstraDBReader(BaseReader):
"""Astra DB reader.
Retrieve documents from an Astra DB Instance.
Args:
collection_name (str): collection name to use. If not existing, it will be created.
token (str): The Astra DB Application Token to use.
api_endpoint (str): The Astra DB JSON API endpoint for your database.
embedding_dimension (int): Length of the embedding vectors in use.
namespace (Optional[str]): The namespace to use. If not provided, 'default_keyspace'
client (Optional[Any]): Astra DB client to use. If not provided, one will be created.
"""
def __init__(
self,
*,
collection_name: str,
token: str,
api_endpoint: str,
embedding_dimension: int,
namespace: Optional[str] = None,
client: Optional[Any] = None,
) -> None:
"""Initialize with parameters."""
import_err_msg = (
"`astrapy` package not found, please run `pip install --upgrade astrapy`"
)
# Try to import astrapy for use
try:
from astrapy.db import AstraDB
except ImportError:
raise ImportError(import_err_msg)
if client is not None:
self._client = client.copy()
self._client.set_caller(
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
else:
# Build the Astra DB object
self._client = AstraDB(
api_endpoint=api_endpoint,
token=token,
namespace=namespace,
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
self._collection = self._client.create_collection(
collection_name=collection_name, dimension=embedding_dimension
)
def load_data(self, vector: List[float], limit: int = 10, **kwargs: Any) -> Any:
"""Load data from Astra DB.
Args:
vector (Any): Query
limit (int): Number of results to return.
kwargs (Any): Additional arguments to pass to the Astra DB query.
Returns:
List[Document]: A list of documents.
"""
results = self._collection.vector_find(vector, limit=limit, **kwargs)
documents: List[Document] = []
for result in results:
document = Document(
doc_id=result["_id"],
text=result["content"],
embedding=result["$vector"],
)
documents.append(document)
return documents
| [
"llama_index.core.schema.Document"
] | [((2732, 2820), 'llama_index.core.schema.Document', 'Document', ([], {'doc_id': "result['_id']", 'text': "result['content']", 'embedding': "result['$vector']"}), "(doc_id=result['_id'], text=result['content'], embedding=result[\n '$vector'])\n", (2740, 2820), False, 'from llama_index.core.schema import Document\n')] |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.legacy.vector_stores.types import (
ExactMatchFilter,
MetadataFilters,
VectorStoreQuery,
)
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.vector_stores.google.generativeai import (
GoogleVectorStore,
set_google_config,
)
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
# Make sure the tests do not hit actual production servers.
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
def test_create_corpus(mock_create_corpus: MagicMock) -> None:
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_create_corpus.side_effect = fake_create_corpus
# Act
store = GoogleVectorStore.create_corpus(display_name="My first corpus")
# Assert
assert len(store.corpus_id) > 0
assert mock_create_corpus.call_count == 1
request = mock_create_corpus.call_args.args[0]
assert request.corpus.name == f"corpora/{store.corpus_id}"
assert request.corpus.display_name == "My first corpus"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_from_corpus(mock_get_corpus: MagicMock) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleVectorStore.from_corpus(corpus_id="123")
# Assert
assert store.corpus_id == "123"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
def test_class_name() -> None:
# Act
class_name = GoogleVectorStore.class_name()
# Assert
assert class_name == "GoogleVectorStore"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks")
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_add(
mock_get_corpus: MagicMock,
mock_get_document: MagicMock,
mock_create_document: MagicMock,
mock_batch_create_chunks: MagicMock,
) -> None:
from google.api_core import exceptions as gapi_exception
# Arrange
# We will use a max requests per batch to be 2.
# Then, we send 3 requests.
# We expect to have 2 batches where the last batch has only 1 request.
genaix._MAX_REQUEST_PER_CHUNK = 2
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_get_document.side_effect = gapi_exception.NotFound("")
mock_create_document.return_value = genai.Document(name="corpora/123/documents/456")
mock_batch_create_chunks.side_effect = [
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/777"),
genai.Chunk(name="corpora/123/documents/456/chunks/888"),
]
),
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/999"),
]
),
]
# Act
store = GoogleVectorStore.from_corpus(corpus_id="123")
response = store.add(
[
TextNode(
text="Hello my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Title for doc 456"},
)
},
metadata={"position": 100},
),
TextNode(
text="Hello my honey",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Title for doc 456"},
)
},
metadata={"position": 200},
),
TextNode(
text="Hello my ragtime gal",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Title for doc 456"},
)
},
metadata={"position": 300},
),
]
)
# Assert
assert response == [
"corpora/123/documents/456/chunks/777",
"corpora/123/documents/456/chunks/888",
"corpora/123/documents/456/chunks/999",
]
create_document_request = mock_create_document.call_args.args[0]
assert create_document_request == genai.CreateDocumentRequest(
parent="corpora/123",
document=genai.Document(
name="corpora/123/documents/456",
display_name="Title for doc 456",
custom_metadata=[
genai.CustomMetadata(
key="file_name",
string_value="Title for doc 456",
),
],
),
)
assert mock_batch_create_chunks.call_count == 2
mock_batch_create_chunks_calls = mock_batch_create_chunks.call_args_list
first_batch_create_chunks_request = mock_batch_create_chunks_calls[0].args[0]
assert first_batch_create_chunks_request == genai.BatchCreateChunksRequest(
parent="corpora/123/documents/456",
requests=[
genai.CreateChunkRequest(
parent="corpora/123/documents/456",
chunk=genai.Chunk(
data=genai.ChunkData(string_value="Hello my baby"),
custom_metadata=[
genai.CustomMetadata(
key="position",
numeric_value=100,
),
],
),
),
genai.CreateChunkRequest(
parent="corpora/123/documents/456",
chunk=genai.Chunk(
data=genai.ChunkData(string_value="Hello my honey"),
custom_metadata=[
genai.CustomMetadata(
key="position",
numeric_value=200,
),
],
),
),
],
)
second_batch_create_chunks_request = mock_batch_create_chunks_calls[1].args[0]
assert second_batch_create_chunks_request == genai.BatchCreateChunksRequest(
parent="corpora/123/documents/456",
requests=[
genai.CreateChunkRequest(
parent="corpora/123/documents/456",
chunk=genai.Chunk(
data=genai.ChunkData(string_value="Hello my ragtime gal"),
custom_metadata=[
genai.CustomMetadata(
key="position",
numeric_value=300,
),
],
),
),
],
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.delete_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_delete(
mock_get_corpus: MagicMock,
mock_delete_document: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.delete(ref_doc_id="doc-456")
# Assert
delete_document_request = mock_delete_document.call_args.args[0]
assert delete_document_request == genai.DeleteDocumentRequest(
name="corpora/123/documents/doc-456",
force=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.query_corpus")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_query(
mock_get_corpus: MagicMock,
mock_query_corpus: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_query_corpus.return_value = genai.QueryCorpusResponse(
relevant_chunks=[
genai.RelevantChunk(
chunk=genai.Chunk(
name="corpora/123/documents/456/chunks/789",
data=genai.ChunkData(string_value="42"),
),
chunk_relevance_score=0.9,
)
]
)
# Act
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
similarity_top_k=1,
)
)
# Assert
assert mock_query_corpus.call_count == 1
query_corpus_request = mock_query_corpus.call_args.args[0]
assert query_corpus_request == genai.QueryCorpusRequest(
name="corpora/123",
query="What is the meaning of life?",
metadata_filters=[
genai.MetadataFilter(
key="author",
conditions=[
genai.Condition(
operation=genai.Condition.Operator.EQUAL,
string_value="Arthur Schopenhauer",
)
],
)
],
results_count=1,
)
| [
"llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.create_corpus",
"llama_index.legacy.vector_stores.types.ExactMatchFilter",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus",
"llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.class_name",
"llama_index.legacy.vector_stores.google.generativeai.set_google_config",
"llama_index.legacy.schema.RelatedNodeInfo"
] | [((855, 914), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (873, 914), False, 'import pytest\n'), ((916, 960), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (921, 960), False, 'from unittest.mock import MagicMock, patch\n'), ((1174, 1233), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1192, 1233), False, 'import pytest\n'), ((1235, 1309), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (1240, 1309), False, 'from unittest.mock import MagicMock, patch\n'), ((1915, 1974), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1933, 1974), False, 'import pytest\n'), ((1976, 2047), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (1981, 2047), False, 'from unittest.mock import MagicMock, patch\n'), ((2311, 2370), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2329, 2370), False, 'import pytest\n'), ((2522, 2581), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2540, 2581), False, 'import pytest\n'), ((2583, 2668), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks'\n )\n", (2588, 2668), False, 'from unittest.mock import MagicMock, patch\n'), ((2665, 2741), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_document')\n", (2670, 2741), False, 'from unittest.mock import MagicMock, patch\n'), ((2743, 2816), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_document')\n", (2748, 2816), False, 'from unittest.mock import MagicMock, patch\n'), ((2818, 2889), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (2823, 2889), False, 'from unittest.mock import MagicMock, patch\n'), ((7904, 7963), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (7922, 7963), False, 'import pytest\n'), ((7965, 8041), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.delete_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.delete_document')\n", (7970, 8041), False, 'from unittest.mock import MagicMock, patch\n'), ((8043, 8114), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (8048, 8114), False, 'from unittest.mock import MagicMock, patch\n'), ((8628, 8687), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (8646, 8687), False, 'import pytest\n'), ((8689, 8762), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.query_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.query_corpus')\n", (8694, 8762), False, 'from unittest.mock import MagicMock, patch\n'), ((8764, 8835), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (8769, 8835), False, 'from unittest.mock import MagicMock, patch\n'), ((732, 833), 'llama_index.legacy.vector_stores.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (749, 833), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((1030, 1082), 'llama_index.legacy.vector_stores.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (1047, 1082), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((1096, 1115), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (1113, 1115), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1577, 1640), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.create_corpus', 'GoogleVectorStore.create_corpus', ([], {'display_name': '"""My first corpus"""'}), "(display_name='My first corpus')\n", (1608, 1640), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((2155, 2187), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (2167, 2187), True, 'import google.ai.generativelanguage as genai\n'), ((2211, 2257), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (2240, 2257), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((2429, 2459), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.class_name', 'GoogleVectorStore.class_name', ([], {}), '()\n', (2457, 2459), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((3367, 3399), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (3379, 3399), True, 'import google.ai.generativelanguage as genai\n'), ((3436, 3463), 'google.api_core.exceptions.NotFound', 'gapi_exception.NotFound', (['""""""'], {}), "('')\n", (3459, 3463), True, 'from google.api_core import exceptions as gapi_exception\n'), ((3504, 3552), 'google.ai.generativelanguage.Document', 'genai.Document', ([], {'name': '"""corpora/123/documents/456"""'}), "(name='corpora/123/documents/456')\n", (3518, 3552), True, 'import google.ai.generativelanguage as genai\n'), ((4023, 4069), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (4052, 4069), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((8261, 8293), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (8273, 8293), True, 'import google.ai.generativelanguage as genai\n'), ((8317, 8363), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (8346, 8363), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((8978, 9010), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (8990, 9010), True, 'import google.ai.generativelanguage as genai\n'), ((9410, 9456), 'llama_index.legacy.vector_stores.google.generativeai.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (9439, 9456), False, 'from llama_index.legacy.vector_stores.google.generativeai import GoogleVectorStore, set_google_config\n'), ((8524, 8601), 'google.ai.generativelanguage.DeleteDocumentRequest', 'genai.DeleteDocumentRequest', ([], {'name': '"""corpora/123/documents/doc-456"""', 'force': '(True)'}), "(name='corpora/123/documents/doc-456', force=True)\n", (8551, 8601), True, 'import google.ai.generativelanguage as genai\n'), ((3676, 3732), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/777"""'}), "(name='corpora/123/documents/456/chunks/777')\n", (3687, 3732), True, 'import google.ai.generativelanguage as genai\n'), ((3750, 3806), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/888"""'}), "(name='corpora/123/documents/456/chunks/888')\n", (3761, 3806), True, 'import google.ai.generativelanguage as genai\n'), ((3911, 3967), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/999"""'}), "(name='corpora/123/documents/456/chunks/999')\n", (3922, 3967), True, 'import google.ai.generativelanguage as genai\n'), ((4243, 4318), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""456"""', 'metadata': "{'file_name': 'Title for doc 456'}"}), "(node_id='456', metadata={'file_name': 'Title for doc 456'})\n", (4258, 4318), False, 'from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode\n'), ((4606, 4681), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""456"""', 'metadata': "{'file_name': 'Title for doc 456'}"}), "(node_id='456', metadata={'file_name': 'Title for doc 456'})\n", (4621, 4681), False, 'from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode\n'), ((4975, 5050), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""456"""', 'metadata': "{'file_name': 'Title for doc 456'}"}), "(node_id='456', metadata={'file_name': 'Title for doc 456'})\n", (4990, 5050), False, 'from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode\n'), ((5743, 5814), 'google.ai.generativelanguage.CustomMetadata', 'genai.CustomMetadata', ([], {'key': '"""file_name"""', 'string_value': '"""Title for doc 456"""'}), "(key='file_name', string_value='Title for doc 456')\n", (5763, 5814), True, 'import google.ai.generativelanguage as genai\n'), ((9259, 9293), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""42"""'}), "(string_value='42')\n", (9274, 9293), True, 'import google.ai.generativelanguage as genai\n'), ((9643, 9702), 'llama_index.legacy.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""author"""', 'value': '"""Arthur Schopenhauer"""'}), "(key='author', value='Arthur Schopenhauer')\n", (9659, 9702), False, 'from llama_index.legacy.vector_stores.types import ExactMatchFilter, MetadataFilters, VectorStoreQuery\n'), ((10252, 10350), 'google.ai.generativelanguage.Condition', 'genai.Condition', ([], {'operation': 'genai.Condition.Operator.EQUAL', 'string_value': '"""Arthur Schopenhauer"""'}), "(operation=genai.Condition.Operator.EQUAL, string_value=\n 'Arthur Schopenhauer')\n", (10267, 10350), True, 'import google.ai.generativelanguage as genai\n'), ((6413, 6458), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""Hello my baby"""'}), "(string_value='Hello my baby')\n", (6428, 6458), True, 'import google.ai.generativelanguage as genai\n'), ((6869, 6915), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""Hello my honey"""'}), "(string_value='Hello my honey')\n", (6884, 6915), True, 'import google.ai.generativelanguage as genai\n'), ((7571, 7623), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""Hello my ragtime gal"""'}), "(string_value='Hello my ragtime gal')\n", (7586, 7623), True, 'import google.ai.generativelanguage as genai\n'), ((6522, 6577), 'google.ai.generativelanguage.CustomMetadata', 'genai.CustomMetadata', ([], {'key': '"""position"""', 'numeric_value': '(100)'}), "(key='position', numeric_value=100)\n", (6542, 6577), True, 'import google.ai.generativelanguage as genai\n'), ((6979, 7034), 'google.ai.generativelanguage.CustomMetadata', 'genai.CustomMetadata', ([], {'key': '"""position"""', 'numeric_value': '(200)'}), "(key='position', numeric_value=200)\n", (6999, 7034), True, 'import google.ai.generativelanguage as genai\n'), ((7687, 7742), 'google.ai.generativelanguage.CustomMetadata', 'genai.CustomMetadata', ([], {'key': '"""position"""', 'numeric_value': '(300)'}), "(key='position', numeric_value=300)\n", (7707, 7742), True, 'import google.ai.generativelanguage as genai\n')] |
"""
The core primatives for any language model interfacing. Docprompt uses these for the prompt garden, but
supports free conversion to and from these types from other libaries.
"""
from typing import Literal, Union, Optional
from pydantic import BaseModel, model_validator
class OpenAIImageURL(BaseModel):
url: str
class OpenAIComplexContent(BaseModel):
type: Literal["text", "image_url"]
text: Optional[str] = None
image_url: Optional[OpenAIImageURL] = None
@model_validator(mode="after")
def validate_content(cls, v):
if v.type == "text" and v.text is None:
raise ValueError("Text content must be provided when type is 'text'")
if v.type == "image_url" and v.image_url is None:
raise ValueError(
"Image URL content must be provided when type is 'image_url'"
)
if v.text is not None and v.image_url is not None:
raise ValueError("Only one of text or image_url can be provided")
return v
class OpenAIMessage(BaseModel):
role: Literal["system", "user", "assistant"]
content: Union[str, list[OpenAIComplexContent]]
def to_langchain_message(self):
try:
from langchain.schema import SystemMessage, HumanMessage, AIMessage
except ImportError:
raise ImportError(
"Could not import langchain.schema. Install with `docprompt[langchain]`"
)
role_mapping = {
"system": SystemMessage,
"user": HumanMessage,
"assistant": AIMessage,
}
dumped = self.model_dump(mode="json", exclude_unset=True, exclude_none=True)
return role_mapping[self.role](content=dumped["content"])
def to_openai(self):
return self.model_dump(mode="json", exclude_unset=True, exclude_none=True)
def to_llamaindex_chat_message(self):
try:
from llama_index.core.base.llms.types import ChatMessage, MessageRole
except ImportError:
raise ImportError(
"Could not import llama_index.core. Install with `docprompt[llamaindex]`"
)
role_mapping = {
"system": MessageRole.SYSTEM,
"user": MessageRole.USER,
"assistant": MessageRole.ASSISTANT,
}
dumped = self.model_dump(mode="json", exclude_unset=True, exclude_none=True)
return ChatMessage.from_str(
content=dumped["content"], role=role_mapping[self.role]
)
| [
"llama_index.core.base.llms.types.ChatMessage.from_str"
] | [((487, 516), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (502, 516), False, 'from pydantic import BaseModel, model_validator\n'), ((2416, 2493), 'llama_index.core.base.llms.types.ChatMessage.from_str', 'ChatMessage.from_str', ([], {'content': "dumped['content']", 'role': 'role_mapping[self.role]'}), "(content=dumped['content'], role=role_mapping[self.role])\n", (2436, 2493), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n')] |
#!/usr/bin/env python3
# Copyright (c) 2023 Steve Castellotti
# This file is part of Urcuchillay and is released under the MIT License.
# See LICENSE file in the project root for full license information.
import argparse
import logging
import os
import sys
import config
import utils
try:
import llama_index
import transformers
except ModuleNotFoundError as e:
print('\nError importing Python module(s)')
print('If installed using setup.sh it may be necessary to run:\n')
print('pyenv activate urcuchillay-env\n')
sys.exit(1)
DEFAULT_PROMPT = 'What is Urcuchillay?'
class Query:
def __init__(self, args):
self.debug = args.debug
data_path = args.data
model_path = args.path
storage_path = args.storage
load = args.load
save = args.save
enable_gpu = args.cpu
temperature = args.temperature
max_new_tokens = args.max_new_tokens
context_window = args.context
self.model_name = args.model
model_url = args.model_url
embed_model_name = args.embed_model_name
embed_model_provider = args.embed_model_provider
pretrained_model_name = args.pretrained_model_name
pretrained_model_provider = args.pretrained_model_provider
level = logging.DEBUG if self.debug else logging.INFO
logging.basicConfig(stream=sys.stdout, level=level)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
llama_debug = llama_index.callbacks.LlamaDebugHandler(print_trace_on_end=self.debug)
self.callback_manager = llama_index.callbacks.CallbackManager([llama_debug])
# llama_index will automatically assume models are cached in a subdirectory of the current path named
# "models" so we need to handle if a user explicitly included "models" at the end of --model_path
cache_directory = model_path
if os.path.basename(model_path) == 'models':
path = os.path.join(model_path, self.model_name)
cache_directory = os.path.dirname(model_path)
else:
path = os.path.join(model_path, 'models', self.model_name)
if os.path.exists(path):
model_url = 'file://' + str(path)
os.environ['LLAMA_INDEX_CACHE_DIR'] = cache_directory
if pretrained_model_name is not None:
llama_index.set_global_tokenizer(
transformers.AutoTokenizer.from_pretrained(
pretrained_model_provider + '/' + pretrained_model_name
).encode
)
self.llm = llama_index.llms.LlamaCPP(
model_url=model_url,
model_path=None, # Note: setting a custom model_path here causes a fault
temperature=temperature,
max_new_tokens=max_new_tokens,
context_window=context_window,
generate_kwargs={}, # kwargs to pass to __call__()
model_kwargs={'n_gpu_layers': enable_gpu}, # kwargs to pass to __init__()
# transform inputs into Llama 2 format
messages_to_prompt=llama_index.llms.llama_utils.messages_to_prompt,
completion_to_prompt=llama_index.llms.llama_utils.completion_to_prompt,
verbose=self.debug,
)
if args.embed_model_name == 'local':
embed_model = args.embed_model_name
else:
# use Huggingface embeddings
embed_model = llama_index.embeddings.HuggingFaceEmbedding(
model_name=embed_model_provider + '/' + embed_model_name)
# create a service context
service_context = llama_index.ServiceContext.from_defaults(
llm=self.llm,
embed_model=embed_model,
callback_manager=self.callback_manager
)
if load:
# load vector index from storage
storage_context = llama_index.StorageContext.from_defaults(persist_dir=storage_path)
index = llama_index.load_index_from_storage(storage_context, service_context=service_context)
else:
# load documents
documents = llama_index.SimpleDirectoryReader(data_path).load_data()
# create vector store index
index = llama_index.VectorStoreIndex.from_documents(
documents, service_context=service_context
)
# persist the index to disk
if save:
index.storage_context.persist(persist_dir=storage_path)
# set up query engine
self.query_engine = index.as_query_engine()
def display_exchange(self, query):
print('Query: %s\n' % query)
if self.model_name in config.Models.MODELS.keys():
query = config.Models.MODELS[self.model_name]['prompt_template'].replace('{prompt}', query)
if self.debug:
print('Query (prompt): %s\n' % query)
response = self.query_engine.query(query)
print('Response: %s\n' % str(response).strip())
def parse_arguments():
parser = argparse.ArgumentParser(description='Process command parameters')
parser = utils.parse_arguments_common(parser)
parser.add_argument('-p', '--prompt', type=str, default=DEFAULT_PROMPT,
help='The prompt to process (default: %(default)s)')
parser.add_argument('--embed_model_name', type=str, default=config.Config.EMBED_MODEL_NAME,
help='The name of the embedding model to use (default: %(default)s)')
parser.add_argument('--embed_model_provider', type=str, default=None,
help='The provider of the embedding model to use (default: %(default)s)')
parser.add_argument('--pretrained_model_name', type=str, default=None,
help='The name of the pretrained model to use (default: %(default)s)')
parser.add_argument('--pretrained_model_provider', type=str, default=None,
help='The provider of the pretrained model to use (default: %(default)s)')
args = parser.parse_args()
args = utils.update_arguments_common(args)
return args
def main():
args = parse_arguments()
llm_query = Query(args=args)
llm_query.display_exchange(args.prompt)
if __name__ == '__main__':
main()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.llms.LlamaCPP",
"llama_index.callbacks.CallbackManager",
"llama_index.load_index_from_storage"
] | [((5042, 5107), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process command parameters"""'}), "(description='Process command parameters')\n", (5065, 5107), False, 'import argparse\n'), ((5121, 5157), 'utils.parse_arguments_common', 'utils.parse_arguments_common', (['parser'], {}), '(parser)\n', (5149, 5157), False, 'import utils\n'), ((6065, 6100), 'utils.update_arguments_common', 'utils.update_arguments_common', (['args'], {}), '(args)\n', (6094, 6100), False, 'import utils\n'), ((541, 552), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (549, 552), False, 'import sys\n'), ((1350, 1401), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'level'}), '(stream=sys.stdout, level=level)\n', (1369, 1401), False, 'import logging\n'), ((1506, 1576), 'llama_index.callbacks.LlamaDebugHandler', 'llama_index.callbacks.LlamaDebugHandler', ([], {'print_trace_on_end': 'self.debug'}), '(print_trace_on_end=self.debug)\n', (1545, 1576), False, 'import llama_index\n'), ((1609, 1661), 'llama_index.callbacks.CallbackManager', 'llama_index.callbacks.CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (1646, 1661), False, 'import llama_index\n'), ((2184, 2204), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2198, 2204), False, 'import os\n'), ((2603, 3000), 'llama_index.llms.LlamaCPP', 'llama_index.llms.LlamaCPP', ([], {'model_url': 'model_url', 'model_path': 'None', 'temperature': 'temperature', 'max_new_tokens': 'max_new_tokens', 'context_window': 'context_window', 'generate_kwargs': '{}', 'model_kwargs': "{'n_gpu_layers': enable_gpu}", 'messages_to_prompt': 'llama_index.llms.llama_utils.messages_to_prompt', 'completion_to_prompt': 'llama_index.llms.llama_utils.completion_to_prompt', 'verbose': 'self.debug'}), "(model_url=model_url, model_path=None, temperature\n =temperature, max_new_tokens=max_new_tokens, context_window=\n context_window, generate_kwargs={}, model_kwargs={'n_gpu_layers':\n enable_gpu}, messages_to_prompt=llama_index.llms.llama_utils.\n messages_to_prompt, completion_to_prompt=llama_index.llms.llama_utils.\n completion_to_prompt, verbose=self.debug)\n", (2628, 3000), False, 'import llama_index\n'), ((3636, 3760), 'llama_index.ServiceContext.from_defaults', 'llama_index.ServiceContext.from_defaults', ([], {'llm': 'self.llm', 'embed_model': 'embed_model', 'callback_manager': 'self.callback_manager'}), '(llm=self.llm, embed_model=\n embed_model, callback_manager=self.callback_manager)\n', (3676, 3760), False, 'import llama_index\n'), ((1441, 1481), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1462, 1481), False, 'import logging\n'), ((1927, 1955), 'os.path.basename', 'os.path.basename', (['model_path'], {}), '(model_path)\n', (1943, 1955), False, 'import os\n'), ((1988, 2029), 'os.path.join', 'os.path.join', (['model_path', 'self.model_name'], {}), '(model_path, self.model_name)\n', (2000, 2029), False, 'import os\n'), ((2060, 2087), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (2075, 2087), False, 'import os\n'), ((2121, 2172), 'os.path.join', 'os.path.join', (['model_path', '"""models"""', 'self.model_name'], {}), "(model_path, 'models', self.model_name)\n", (2133, 2172), False, 'import os\n'), ((3455, 3560), 'llama_index.embeddings.HuggingFaceEmbedding', 'llama_index.embeddings.HuggingFaceEmbedding', ([], {'model_name': "(embed_model_provider + '/' + embed_model_name)"}), "(model_name=embed_model_provider +\n '/' + embed_model_name)\n", (3498, 3560), False, 'import llama_index\n'), ((3895, 3961), 'llama_index.StorageContext.from_defaults', 'llama_index.StorageContext.from_defaults', ([], {'persist_dir': 'storage_path'}), '(persist_dir=storage_path)\n', (3935, 3961), False, 'import llama_index\n'), ((3982, 4072), 'llama_index.load_index_from_storage', 'llama_index.load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=\n service_context)\n', (4017, 4072), False, 'import llama_index\n'), ((4252, 4344), 'llama_index.VectorStoreIndex.from_documents', 'llama_index.VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=\n service_context)\n', (4295, 4344), False, 'import llama_index\n'), ((4683, 4710), 'config.Models.MODELS.keys', 'config.Models.MODELS.keys', ([], {}), '()\n', (4708, 4710), False, 'import config\n'), ((1410, 1429), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1427, 1429), False, 'import logging\n'), ((2424, 2527), 'transformers.AutoTokenizer.from_pretrained', 'transformers.AutoTokenizer.from_pretrained', (["(pretrained_model_provider + '/' + pretrained_model_name)"], {}), "(pretrained_model_provider + '/' +\n pretrained_model_name)\n", (2466, 2527), False, 'import transformers\n'), ((4135, 4179), 'llama_index.SimpleDirectoryReader', 'llama_index.SimpleDirectoryReader', (['data_path'], {}), '(data_path)\n', (4168, 4179), False, 'import llama_index\n')] |
import gradio as gr
from dotenv import load_dotenv
from prompts import context
#from note_engine import note_engine
import llama_index
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
Settings,
StorageContext,
load_index_from_storage,
)
from pymilvus import connections
from llama_index.vector_stores.milvus import MilvusVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
Settings.embed_model = embed_model
load_dotenv()
import logging
import sys
logging.basicConfig(stream=sys.stderr)
llama_index.core.set_global_handler("simple")
try:
vector_store_books = MilvusVectorStore(dim=384, collection_name="books")
storage_context = StorageContext.from_defaults(
persist_dir="./storage/books",
vector_store=vector_store_books,
)
books_index = load_index_from_storage(storage_context)
except Exception as error:
print(f'Unable to load index from storage: {error}')
print('Indexing book dataset')
vector_store_books = MilvusVectorStore(dim=384, collection_name="books", overwrite=True)
book_docs = SimpleDirectoryReader(input_dir="./data").load_data()
storage_context = StorageContext.from_defaults(
vector_store=vector_store_books,
)
books_index = VectorStoreIndex.from_documents(book_docs, storage_context=storage_context)
books_index.storage_context.persist(persist_dir="./storage/books")
books_query_engine = books_index.as_query_engine(similarity_top_k=3)
tools = [
QueryEngineTool(
query_engine=books_query_engine,
metadata=ToolMetadata(
name="books_data",
description="Provides information about known books; ONLY books known to this tool should be considered when answering questions about books",
),
),
]
# # This is the main agent
llm = OpenAI(model="gpt-3.5-turbo-0613")
agent = ReActAgent.from_tools(tools, llm=llm, verbose=True, context=context)
# UI for demo
def chat_interface(prompt):
# Send the prompt to the agent and get the response
response = agent.query(prompt)
print(response)
return response
iface = gr.Interface(fn=chat_interface,
inputs="text",
outputs="text",
allow_flagging="never")
iface.launch(share=True)
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.agent.ReActAgent.from_tools",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.set_global_handler",
"llama_index.vector_stores.milvus.MilvusVectorStore",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI",
"llama_index.core.tools.ToolMetadata",
"llama_index.core.load_index_from_storage"
] | [((613, 670), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-small-en-v1.5"""'}), "(model_name='BAAI/bge-small-en-v1.5')\n", (633, 670), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((707, 720), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (718, 720), False, 'from dotenv import load_dotenv\n'), ((749, 787), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr'}), '(stream=sys.stderr)\n', (768, 787), False, 'import logging\n'), ((788, 833), 'llama_index.core.set_global_handler', 'llama_index.core.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (823, 833), False, 'import llama_index\n'), ((2074, 2108), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (2080, 2108), False, 'from llama_index.llms.openai import OpenAI\n'), ((2117, 2185), 'llama_index.core.agent.ReActAgent.from_tools', 'ReActAgent.from_tools', (['tools'], {'llm': 'llm', 'verbose': '(True)', 'context': 'context'}), '(tools, llm=llm, verbose=True, context=context)\n', (2138, 2185), False, 'from llama_index.core.agent import ReActAgent\n'), ((2370, 2460), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'chat_interface', 'inputs': '"""text"""', 'outputs': '"""text"""', 'allow_flagging': '"""never"""'}), "(fn=chat_interface, inputs='text', outputs='text',\n allow_flagging='never')\n", (2382, 2460), True, 'import gradio as gr\n'), ((865, 916), 'llama_index.vector_stores.milvus.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': '(384)', 'collection_name': '"""books"""'}), "(dim=384, collection_name='books')\n", (882, 916), False, 'from llama_index.vector_stores.milvus import MilvusVectorStore\n'), ((939, 1036), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage/books"""', 'vector_store': 'vector_store_books'}), "(persist_dir='./storage/books', vector_store=\n vector_store_books)\n", (967, 1036), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings, StorageContext, load_index_from_storage\n'), ((1073, 1113), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1096, 1113), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings, StorageContext, load_index_from_storage\n'), ((1258, 1325), 'llama_index.vector_stores.milvus.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': '(384)', 'collection_name': '"""books"""', 'overwrite': '(True)'}), "(dim=384, collection_name='books', overwrite=True)\n", (1275, 1325), False, 'from llama_index.vector_stores.milvus import MilvusVectorStore\n'), ((1418, 1479), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store_books'}), '(vector_store=vector_store_books)\n', (1446, 1479), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings, StorageContext, load_index_from_storage\n'), ((1513, 1588), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['book_docs'], {'storage_context': 'storage_context'}), '(book_docs, storage_context=storage_context)\n', (1544, 1588), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings, StorageContext, load_index_from_storage\n'), ((1820, 2004), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""books_data"""', 'description': '"""Provides information about known books; ONLY books known to this tool should be considered when answering questions about books"""'}), "(name='books_data', description=\n 'Provides information about known books; ONLY books known to this tool should be considered when answering questions about books'\n )\n", (1832, 2004), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n'), ((1342, 1383), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data"""'}), "(input_dir='./data')\n", (1363, 1383), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings, StorageContext, load_index_from_storage\n')] |
from typing import Any, List, Optional, Sequence
from llama_index.core.prompts.prompt_utils import get_biggest_prompt
from llama_index.core.response_synthesizers.refine import Refine
from llama_index.core.types import RESPONSE_TEXT_TYPE
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class CompactAndRefine(Refine):
"""Refine responses across compact text chunks."""
@dispatcher.span
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
prev_response: Optional[RESPONSE_TEXT_TYPE] = None,
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
compact_texts = self._make_compact_text_chunks(query_str, text_chunks)
return await super().aget_response(
query_str=query_str,
text_chunks=compact_texts,
prev_response=prev_response,
**response_kwargs,
)
@dispatcher.span
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
prev_response: Optional[RESPONSE_TEXT_TYPE] = None,
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Get compact response."""
# use prompt helper to fix compact text_chunks under the prompt limitation
# TODO: This is a temporary fix - reason it's temporary is that
# the refine template does not account for size of previous answer.
new_texts = self._make_compact_text_chunks(query_str, text_chunks)
return super().get_response(
query_str=query_str,
text_chunks=new_texts,
prev_response=prev_response,
**response_kwargs,
)
def _make_compact_text_chunks(
self, query_str: str, text_chunks: Sequence[str]
) -> List[str]:
text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
refine_template = self._refine_template.partial_format(query_str=query_str)
max_prompt = get_biggest_prompt([text_qa_template, refine_template])
return self._prompt_helper.repack(max_prompt, text_chunks)
| [
"llama_index.core.prompts.prompt_utils.get_biggest_prompt",
"llama_index.core.instrumentation.get_dispatcher"
] | [((306, 341), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (331, 341), True, 'import llama_index.core.instrumentation as instrument\n'), ((2027, 2082), 'llama_index.core.prompts.prompt_utils.get_biggest_prompt', 'get_biggest_prompt', (['[text_qa_template, refine_template]'], {}), '([text_qa_template, refine_template])\n', (2045, 2082), False, 'from llama_index.core.prompts.prompt_utils import get_biggest_prompt\n')] |
import llama_index, os
import dill as pickle # dill is a more powerful version of pickle
from llama_index import ServiceContext, StorageContext
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
load_dotenv('app/.env')
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0
)
llm_embeddings = OpenAIEmbeddings()
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=llm_embeddings
)
llama_index.set_global_service_context(service_context)
# The other computational tasks
ccel_storage_context = StorageContext.from_defaults(persist_dir='app/systematic_theology')
# if precomputed_results directory doesn't exist, create it
if not os.path.exists('precomputed_results'):
os.makedirs('precomputed_results')
# Serialize with dill
with open('precomputed_results/ccel_storage_context.pkl', 'wb') as f:
pickle.dump(ccel_storage_context, f) | [
"llama_index.set_global_service_context",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults"
] | [((271, 294), 'dotenv.load_dotenv', 'load_dotenv', (['"""app/.env"""'], {}), "('app/.env')\n", (282, 294), False, 'from dotenv import load_dotenv\n'), ((313, 345), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (327, 345), False, 'import llama_index, os\n'), ((353, 406), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (363, 406), False, 'from langchain.chat_models import ChatOpenAI\n'), ((436, 454), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (452, 454), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((474, 539), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'llm_embeddings'}), '(llm=llm, embed_model=llm_embeddings)\n', (502, 539), False, 'from llama_index import ServiceContext, StorageContext\n'), ((547, 602), 'llama_index.set_global_service_context', 'llama_index.set_global_service_context', (['service_context'], {}), '(service_context)\n', (585, 602), False, 'import llama_index, os\n'), ((659, 726), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""app/systematic_theology"""'}), "(persist_dir='app/systematic_theology')\n", (687, 726), False, 'from llama_index import ServiceContext, StorageContext\n'), ((795, 832), 'os.path.exists', 'os.path.exists', (['"""precomputed_results"""'], {}), "('precomputed_results')\n", (809, 832), False, 'import llama_index, os\n'), ((838, 872), 'os.makedirs', 'os.makedirs', (['"""precomputed_results"""'], {}), "('precomputed_results')\n", (849, 872), False, 'import llama_index, os\n'), ((970, 1006), 'dill.dump', 'pickle.dump', (['ccel_storage_context', 'f'], {}), '(ccel_storage_context, f)\n', (981, 1006), True, 'import dill as pickle\n')] |
import os
import pickle
from dotenv import load_dotenv
import llama_index
from langchain import OpenAI
from langchain.embeddings import OpenAIEmbeddings
from llama_index import LLMPredictor, ServiceContext
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTVectorStoreIndex
class IndexChatBot:
def __init__(self, project_name : str, artifact_folder : str) -> None:
self.story_summary_path = os.path.join(artifact_folder, project_name, "story_summary.pkl")
self.story_summary = pickle.load(
open(self.story_summary_path, "rb")
)
chunks = self.story_summary['summary_chunks']
self.documents = [llama_index.Document(t) for t in chunks]
load_dotenv(dotenv_path='.env/openai.env')
try:
_openai_key = os.getenv("OPENAI_API_KEY")
self.llm_predictor = LLMPredictor(
llm = OpenAI(openai_api_key=_openai_key, temperature=0),
)
openai_embedding = OpenAIEmbeddings(openai_api_key=_openai_key)
self.embed_model = LangchainEmbedding(openai_embedding)
self.service_context = ServiceContext.from_defaults(
llm_predictor=self.llm_predictor,embed_model=self.embed_model
)
self.vector_store_index = GPTVectorStoreIndex.from_documents(
self.documents, service_context=self.service_context
)
except Exception as e:
print(f"Certain exception occured as {e}")
self.query_engine = self.vector_store_index.as_query_engine()
print("=> Everything loaded successfully")
def get_response(self, query : str) -> str:
return str(self.query_engine.query(query)) | [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.Document",
"llama_index.LangchainEmbedding"
] | [((426, 490), 'os.path.join', 'os.path.join', (['artifact_folder', 'project_name', '"""story_summary.pkl"""'], {}), "(artifact_folder, project_name, 'story_summary.pkl')\n", (438, 490), False, 'import os\n'), ((721, 763), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': '""".env/openai.env"""'}), "(dotenv_path='.env/openai.env')\n", (732, 763), False, 'from dotenv import load_dotenv\n'), ((671, 694), 'llama_index.Document', 'llama_index.Document', (['t'], {}), '(t)\n', (691, 694), False, 'import llama_index\n'), ((803, 830), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (812, 830), False, 'import os\n'), ((996, 1040), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': '_openai_key'}), '(openai_api_key=_openai_key)\n', (1012, 1040), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1072, 1108), 'llama_index.LangchainEmbedding', 'LangchainEmbedding', (['openai_embedding'], {}), '(openai_embedding)\n', (1090, 1108), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTVectorStoreIndex\n'), ((1145, 1242), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'self.llm_predictor', 'embed_model': 'self.embed_model'}), '(llm_predictor=self.llm_predictor, embed_model=\n self.embed_model)\n', (1173, 1242), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((1306, 1399), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['self.documents'], {'service_context': 'self.service_context'}), '(self.documents, service_context=self.\n service_context)\n', (1340, 1399), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTVectorStoreIndex\n'), ((900, 949), 'langchain.OpenAI', 'OpenAI', ([], {'openai_api_key': '_openai_key', 'temperature': '(0)'}), '(openai_api_key=_openai_key, temperature=0)\n', (906, 949), False, 'from langchain import OpenAI\n')] |
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
bulk_data = True
# ----------------------------------
async def connect_to_elasticsearch():
# Instantiate the Elasticsearch client right away to check we can connect
from elasticsearch import AsyncElasticsearch
es_client = AsyncElasticsearch(
[os.getenv("ES_URL")],
ssl_assert_fingerprint=os.getenv("ES_CERTIFICATE_FINGERPRINT"),
basic_auth=(os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"))
)
await es_client.info() # this connects to the cluster and gets its version
if bulk_data:
await es_client.indices.delete(index=os.getenv("ES_DEFAULT_INDEX"), ignore=[400, 404])
return es_client
def load_data(es_client):
from llama_index import SimpleDirectoryReader
# Creates a reader for the /data folder
if bulk_data:
documents = SimpleDirectoryReader("python/data").load_data(show_progress=True)
# Creates the ES vector store
from llama_index.vector_stores import ElasticsearchStore
ES_DEFAULT_INDEX = os.getenv("ES_DEFAULT_INDEX")
es_vector_store = ElasticsearchStore(
index_name=ES_DEFAULT_INDEX,
es_client=es_client ,
)
# Service ctx for debug
from llama_index import ServiceContext
from llama_index.llms import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0)
from llama_index.embeddings import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
service_context = ServiceContext.from_defaults(
# callback_manager=callback_manager,
llm=llm,
embed_model=embed_model
)
# Creates the index
import llama_index
llama_index.set_global_handler("simple")
from llama_index import VectorStoreIndex
from llama_index.storage.storage_context import StorageContext
storage_context = StorageContext.from_defaults(vector_store=es_vector_store)
if bulk_data:
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
service_context=service_context
)
else:
index = VectorStoreIndex.from_vector_store(
vector_store=es_vector_store,
service_context=service_context)
from llama_hub.youtube_transcript import YoutubeTranscriptReader
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=['https://www.youtube.com/watch?v=i3OYlaoj-BM'])
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, service_context=service_context
)
return index
async def main():
es_client = await connect_to_elasticsearch()
index = load_data(es_client)
# set Logging to DEBUG for more detailed outputs
query_engine = index.as_query_engine()
# What is Prince and what can you tell me about Hyphenation?
while (True):
question = input("Enter your question: ")
if question == "":
question = "what is the address of the bank of yes logic?"
response = query_engine.query(question)
print("**************************** REFERENCES ****************************")
print("Refs " + str(response.source_nodes))
print("**************************** Q&A ****************************")
print("Q: " + question)
print("A: " + str(response))
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.set_global_handler",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.vector_stores.ElasticsearchStore"
] | [((27, 86), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (46, 86), False, 'import logging\n'), ((202, 215), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (213, 215), False, 'from dotenv import load_dotenv\n'), ((234, 261), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (243, 261), False, 'import os\n'), ((3747, 3771), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3769, 3771), False, 'import asyncio\n'), ((118, 158), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (139, 158), False, 'import logging\n'), ((1284, 1313), 'os.getenv', 'os.getenv', (['"""ES_DEFAULT_INDEX"""'], {}), "('ES_DEFAULT_INDEX')\n", (1293, 1313), False, 'import os\n'), ((1337, 1405), 'llama_index.vector_stores.ElasticsearchStore', 'ElasticsearchStore', ([], {'index_name': 'ES_DEFAULT_INDEX', 'es_client': 'es_client'}), '(index_name=ES_DEFAULT_INDEX, es_client=es_client)\n', (1355, 1405), False, 'from llama_index.vector_stores import ElasticsearchStore\n'), ((1566, 1610), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (1572, 1610), False, 'from llama_index.llms import OpenAI\n'), ((1698, 1771), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""'}), "(model_name='sentence-transformers/all-MiniLM-L6-v2')\n", (1718, 1771), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1795, 1857), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (1823, 1857), False, 'from llama_index import ServiceContext\n'), ((1978, 2018), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (2008, 2018), False, 'import llama_index\n'), ((2155, 2213), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'es_vector_store'}), '(vector_store=es_vector_store)\n', (2183, 2213), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((2656, 2681), 'llama_hub.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (2679, 2681), False, 'from llama_hub.youtube_transcript import YoutubeTranscriptReader\n'), ((2785, 2897), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n service_context=service_context)\n', (2816, 2897), False, 'from llama_index import VectorStoreIndex\n'), ((87, 106), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (104, 106), False, 'import logging\n'), ((2253, 2365), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n service_context=service_context)\n', (2284, 2365), False, 'from llama_index import VectorStoreIndex\n'), ((2436, 2537), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'es_vector_store', 'service_context': 'service_context'}), '(vector_store=es_vector_store,\n service_context=service_context)\n', (2470, 2537), False, 'from llama_index import VectorStoreIndex\n'), ((529, 548), 'os.getenv', 'os.getenv', (['"""ES_URL"""'], {}), "('ES_URL')\n", (538, 548), False, 'import os\n'), ((582, 621), 'os.getenv', 'os.getenv', (['"""ES_CERTIFICATE_FINGERPRINT"""'], {}), "('ES_CERTIFICATE_FINGERPRINT')\n", (591, 621), False, 'import os\n'), ((643, 667), 'os.getenv', 'os.getenv', (['"""ES_USERNAME"""'], {}), "('ES_USERNAME')\n", (652, 667), False, 'import os\n'), ((669, 693), 'os.getenv', 'os.getenv', (['"""ES_PASSWORD"""'], {}), "('ES_PASSWORD')\n", (678, 693), False, 'import os\n'), ((1098, 1134), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""python/data"""'], {}), "('python/data')\n", (1119, 1134), False, 'from llama_index import SimpleDirectoryReader\n'), ((856, 885), 'os.getenv', 'os.getenv', (['"""ES_DEFAULT_INDEX"""'], {}), "('ES_DEFAULT_INDEX')\n", (865, 885), False, 'import os\n')] |
import argparse
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import LlamaCPP
# use Huggingface embeddings
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
from llama_index import StorageContext, load_index_from_storage
from llama_index.prompts import PromptTemplate
from llama_index.llms import ChatMessage, MessageRole
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
import llama_index.response_synthesizers
parser = argparse.ArgumentParser()
parser.add_argument("--language_model_path", type=str)
parser.add_argument("--embed_model_name", type=str)
parser.add_argument("--context_window", type=int, default=3900)
parser.add_argument("--max_new_tokens", type=int, default=256)
parser.add_argument("--n_gpu_layers", type=int)
# default_prompt = PromptTemplate("""\
# Given a conversation (between Human and Assistant) and a follow up message from Human, \
# rewrite the message to be a standalone question that captures all relevant context \
# from the conversation.
# <Chat History>
# {chat_history}
# <Follow Up Message>
# {question}
# <Standalone question>
# """)
# # list of `ChatMessage` objects
# custom_chat_history = [
# ChatMessage(
# role=MessageRole.USER,
# content='Hello assistant, we are having a insightful discussion about Paul Graham today.'
# ),
# ChatMessage(
# role=MessageRole.ASSISTANT,
# content='Okay, sounds good.'
# )
# ]
args = parser.parse_args()
llm = LlamaCPP(
# You can pass in the URL to a GGML model to download it automatically
model_url=None,
# optionally, you can set the path to a pre-downloaded model instead of model_url
model_path=args.language_model_path,
temperature=0.1,
max_new_tokens=args.max_new_tokens,
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
context_window=args.context_window,
# kwargs to pass to __call__()
generate_kwargs={},
# kwargs to pass to __init__()
# set to at least 1 to use GPU
model_kwargs={"n_gpu_layers": args.n_gpu_layers},
# transform inputs into Llama2 format
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=True,
)
embed_model = HuggingFaceEmbedding(model_name=args.embed_model_name)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
documents = SimpleDirectoryReader('data2').load_data()
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
query_engine = index.as_query_engine()
# chat_engine = CondenseQuestionChatEngine.from_defaults(
# query_engine=query_engine,
# condense_question_prompt=custom_prompt,
# chat_history=custom_chat_history,
# service_context=service_context,
# verbose=True
# )
chat_engine = index.as_chat_engine(
chat_mode='react',
verbose=True
)
while True:
question = input("What is your question?")
if question == "reset":
chat_engine.reset()
continue
print(chat_engine.chat(question)) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.llms.LlamaCPP"
] | [((693, 718), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (716, 718), False, 'import argparse\n'), ((1711, 2046), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_url': 'None', 'model_path': 'args.language_model_path', 'temperature': '(0.1)', 'max_new_tokens': 'args.max_new_tokens', 'context_window': 'args.context_window', 'generate_kwargs': '{}', 'model_kwargs': "{'n_gpu_layers': args.n_gpu_layers}", 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt', 'verbose': '(True)'}), "(model_url=None, model_path=args.language_model_path, temperature=\n 0.1, max_new_tokens=args.max_new_tokens, context_window=args.\n context_window, generate_kwargs={}, model_kwargs={'n_gpu_layers': args.\n n_gpu_layers}, messages_to_prompt=messages_to_prompt,\n completion_to_prompt=completion_to_prompt, verbose=True)\n", (1719, 2046), False, 'from llama_index.llms import LlamaCPP\n'), ((2493, 2547), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'args.embed_model_name'}), '(model_name=args.embed_model_name)\n', (2513, 2547), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((2568, 2630), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (2596, 2630), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n'), ((2704, 2779), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (2735, 2779), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n'), ((2653, 2683), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data2"""'], {}), "('data2')\n", (2674, 2683), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.argilla_callback import argilla_callback_handler
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.argilla_callback.argilla_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((1144, 1179), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1164, 1179), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1237, 1280), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1265, 1280), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1338, 1383), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1368, 1383), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1437, 1478), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1534, 1567), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1552, 1567), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1620, 1660), 'llama_index.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1645, 1660), False, 'from llama_index.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1711, 1742), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1727, 1742), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1794, 1833), 'llama_index.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1818, 1833), False, 'from llama_index.callbacks.argilla_callback import argilla_callback_handler\n')] |
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.gemini import Gemini
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
MODEL = "Gemini"
DATA_DIR = "data"
SUMMARY_ROOT = "summaries"
SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_")
os.makedirs(SUMMARY_DIR, exist_ok=True)
def saveText(path, text):
"Save the given text to a file at the specified path."
with open(path, "w") as f:
f.write(text)
def commentPaths(ticketNumber):
"Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`."
ticketDir = os.path.join(DATA_DIR, ticketNumber)
return sorted(glob.glob(os.path.join(ticketDir, "*.txt")))
def summaryPath(ticketNumber):
"Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`."
return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt")
def totalSizeKB(paths):
"Returns the total size in kilobytes of the files specified by `paths`."
return sum(os.path.getsize(path) for path in paths) / 1024
def currentTime():
"Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'."
from datetime import datetime
now = datetime.now()
return now.strftime("%d/%m/%Y %H:%M:%S")
llm = Gemini()
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
summarizer = TreeSummarize(service_context=service_context, verbose=False)
SUMMARY_PROMPT = "The following text is a series of messages from a PaperCut support ticket. Summarise the whole conversation, including a list of particpants and who they work for, the problem or problems, the key events and date they occurred, and the current status of the ticket. Include any log lines from the messages."
def summariseTicket(ticketNumber):
"Summarizes the Zendesk ticket with the given `ticketNumber` and returns the summary text."
input_files = commentPaths(ticketNumber)
reader = SimpleDirectoryReader(input_files=input_files)
docs = reader.load_data()
texts = [doc.text for doc in docs]
return summarizer.get_response(SUMMARY_PROMPT, texts)
#
# Test case.
#
if __name__ == "__main__":
import time
print(f"MODEL={MODEL}")
ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*")))
ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k))
print(ticketNumbers)
for i, ticketNumber in enumerate(ticketNumbers):
paths = commentPaths(ticketNumber)
print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb")
# ticketNumbers = ticketNumbers[:1]
t00 = time.time()
summaries = {}
durations = {}
commentCounts = {}
commentSizes = {}
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = len(commentPaths(ticketNumber))
commentSize = totalSizeKB(commentPaths(ticketNumber))
print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}",
flush=True)
if os.path.exists(summaryPath(ticketNumber)):
print(f"Skipping ticket {ticketNumber}", flush=True)
continue # Skip tickets that have already been summarised.
t0 = time.time()
summary = summariseTicket(ticketNumber)
duration = time.time() - t0
description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}"
print(f" {description}", flush=True)
with open(summaryPath(ticketNumber), "w") as f:
print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f)
print(summary, file=f)
summaries[ticketNumber] = summary
durations[ticketNumber] = duration
commentCounts[ticketNumber] = commentCount
commentSizes[ticketNumber] = commentSize
duration = time.time() - t00
print("====================^^^====================")
print(f"Duration: {duration:.2f} seconds")
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = commentCounts[ticketNumber]
commentSize = totalSizeKB(commentPaths(ticketNumber))
duration = durations[ticketNumber]
print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
| [
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.llms.gemini.Gemini",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.ServiceContext.from_defaults"
] | [((374, 413), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (385, 413), False, 'import os\n'), ((1344, 1352), 'llama_index.llms.gemini.Gemini', 'Gemini', ([], {}), '()\n', (1350, 1352), False, 'from llama_index.llms.gemini import Gemini\n'), ((1371, 1429), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (1399, 1429), False, 'from llama_index.core import ServiceContext\n'), ((1443, 1504), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (1456, 1504), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((695, 731), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (707, 731), False, 'import os\n'), ((931, 979), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (943, 979), False, 'import os\n'), ((1277, 1291), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1289, 1291), False, 'from datetime import datetime\n'), ((2022, 2068), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (2043, 2068), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((2726, 2737), 'time.time', 'time.time', ([], {}), '()\n', (2735, 2737), False, 'import time\n'), ((321, 354), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (333, 354), False, 'import os\n'), ((3342, 3353), 'time.time', 'time.time', ([], {}), '()\n', (3351, 3353), False, 'import time\n'), ((3995, 4006), 'time.time', 'time.time', ([], {}), '()\n', (4004, 4006), False, 'import time\n'), ((760, 792), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (772, 792), False, 'import os\n'), ((2312, 2334), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2328, 2334), False, 'import os\n'), ((3421, 3432), 'time.time', 'time.time', ([], {}), '()\n', (3430, 3432), False, 'import time\n'), ((1097, 1118), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1112, 1118), False, 'import os\n'), ((2357, 2384), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (2369, 2384), False, 'import os\n')] |
import llama_index
import weaviate
from importlib.metadata import version
print(f"LlamaIndex version: {version('llama_index')}")
print(f"Weaviate version: {version('weaviate-client')}")
# Load API key from .env file
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
# Define embedding model and LLM
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.settings import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
Settings.embed_model = OpenAIEmbedding()
# Load the index with some example data
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["./data/paul_graham_essay.txt"]
).load_data()
# Chunk documents into nodes
from llama_index.core.node_parser import SentenceWindowNodeParser
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
# Extract nodes from documents
nodes = node_parser.get_nodes_from_documents(documents)
# Build the index
import weaviate
client = weaviate.Client(
embedded_options=weaviate.embedded.EmbeddedOptions(),
)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.weaviate import WeaviateVectorStore
index_name = "MyExternalContent"
# Construct vector store
vector_store = WeaviateVectorStore(
weaviate_client=client,
index_name=index_name,
)
# Set up the storage for the embeddings
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# If an index with the same index name already exists within Weaviate, delete it
if client.schema.exists(index_name):
client.schema.delete_class(index_name)
# Setup the index
# build VectorStoreIndex that takes care of chunking documents
# and encoding chunks to embeddings for future retrieval
index = VectorStoreIndex(
nodes,
storage_context=storage_context,
)
# Setup the query engine
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
# The target key defaults to `window` to match the node_parser's default
postproc = MetadataReplacementPostProcessor(
target_metadata_key="window"
)
from llama_index.core.postprocessor import SentenceTransformerRerank
# Define reranker model
rerank = SentenceTransformerRerank(
top_n = 2,
model = "BAAI/bge-reranker-base"
)
query_engine = index.as_query_engine(
similarity_top_k = 6,
vector_store_query_mode="hybrid",
alpha=0.5,
node_postprocessors = [postproc, rerank],
)
# Run a query against the naive RAG implementation
response = query_engine.query(
"What happened at InterLeaf?",
)
print(response)
window = response.source_nodes[0].node.metadata["window"]
sentence = response.source_nodes[0].node.metadata["original_text"]
print(f"Window: {window}")
print("------------------")
print(f"Original Sentence: {sentence}") | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.postprocessor.SentenceTransformerRerank",
"llama_index.embeddings.openai.OpenAIEmbedding",
"llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.core.VectorStoreIndex",
"llama_index.core.postprocessor.MetadataReplacementPostProcessor",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI",
"llama_index.vector_stores.weaviate.WeaviateVectorStore"
] | [((500, 546), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo', temperature=0.1)\n", (506, 546), False, 'from llama_index.llms.openai import OpenAI\n'), ((570, 587), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (585, 587), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((953, 1085), 'llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (991, 1085), False, 'from llama_index.core.node_parser import SentenceWindowNodeParser\n'), ((1512, 1578), 'llama_index.vector_stores.weaviate.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'client', 'index_name': 'index_name'}), '(weaviate_client=client, index_name=index_name)\n', (1531, 1578), False, 'from llama_index.vector_stores.weaviate import WeaviateVectorStore\n'), ((1649, 1704), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1677, 1704), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((2014, 2070), 'llama_index.core.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context'}), '(nodes, storage_context=storage_context)\n', (2030, 2070), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((2270, 2332), 'llama_index.core.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2302, 2332), False, 'from llama_index.core.postprocessor import MetadataReplacementPostProcessor\n'), ((2443, 2509), 'llama_index.core.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': '(2)', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=2, model='BAAI/bge-reranker-base')\n", (2468, 2509), False, 'from llama_index.core.postprocessor import SentenceTransformerRerank\n'), ((286, 299), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (297, 299), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((694, 761), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['./data/paul_graham_essay.txt']"}), "(input_files=['./data/paul_graham_essay.txt'])\n", (715, 761), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((1268, 1303), 'weaviate.embedded.EmbeddedOptions', 'weaviate.embedded.EmbeddedOptions', ([], {}), '()\n', (1301, 1303), False, 'import weaviate\n'), ((104, 126), 'importlib.metadata.version', 'version', (['"""llama_index"""'], {}), "('llama_index')\n", (111, 126), False, 'from importlib.metadata import version\n'), ((157, 183), 'importlib.metadata.version', 'version', (['"""weaviate-client"""'], {}), "('weaviate-client')\n", (164, 183), False, 'from importlib.metadata import version\n')] |
import llama_index
from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, LlamaIndex
import os
import openai
os.environ['OPENAI_API_KEY'] = 'sk-YOUR-API-KEY'
# Loading from a directory
documents = SimpleDirectoryReader('data').load_data()
index = LlamaIndex()
documents = [Document(text="What is the meaning of life?"), Document(text="How do I make a cup of coffee?")]
nodes = [service_context.node_parser.get_nodes_from_document(document) for document in documents]
index = GPTVectorStoreIndex.from_documents(nodes)
# Construct a simple vector index
#index = GPTVectorStoreIndex(documents)
# Save your index to a index.json file
index.save_to_disk('index.json')
# Load the index from your saved index.json file
index = GPTVectorStoreIndex.load_from_disk('index.json')
# Querying the index
response = index.query("What features do users want to see in the app?")
print(response)
| [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.load_from_disk",
"llama_index.LlamaIndex",
"llama_index.Document"
] | [((273, 285), 'llama_index.LlamaIndex', 'LlamaIndex', ([], {}), '()\n', (283, 285), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, LlamaIndex\n'), ((501, 542), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['nodes'], {}), '(nodes)\n', (535, 542), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, LlamaIndex\n'), ((749, 797), 'llama_index.GPTVectorStoreIndex.load_from_disk', 'GPTVectorStoreIndex.load_from_disk', (['"""index.json"""'], {}), "('index.json')\n", (783, 797), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, LlamaIndex\n'), ((299, 344), 'llama_index.Document', 'Document', ([], {'text': '"""What is the meaning of life?"""'}), "(text='What is the meaning of life?')\n", (307, 344), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, LlamaIndex\n'), ((346, 393), 'llama_index.Document', 'Document', ([], {'text': '"""How do I make a cup of coffee?"""'}), "(text='How do I make a cup of coffee?')\n", (354, 393), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, LlamaIndex\n'), ((222, 251), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (243, 251), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, LlamaIndex\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.filters) == 1:
filter = standard_filters.filters[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.filters:
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception as e:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.filters) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3666, 3682), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3674, 3682), True, 'import numpy as np\n'), ((5347, 5367), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5365, 5367), False, 'import nest_asyncio\n'), ((16696, 16736), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16700, 16736), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12177, 12252), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12187, 12252), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9669, 9693), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9691, 9693), False, 'import asyncio\n'), ((11569, 11614), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11590, 11614), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12324, 12388), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12334, 12388), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13262, 13286), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13284, 13286), False, 'import asyncio\n'), ((15474, 15498), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15496, 15498), False, 'import asyncio\n'), ((18587, 18618), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18608, 18618), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3718, 3740), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3724, 3740), True, 'import numpy as np\n'), ((11803, 11815), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11813, 11815), False, 'import uuid\n'), ((19261, 19404), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19269, 19404), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3771), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3763, 3771), True, 'import numpy as np\n'), ((5436, 5456), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5454, 5456), False, 'import nest_asyncio\n'), ((16834, 16874), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16838, 16874), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12320, 12395), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12330, 12395), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9785, 9809), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9807, 9809), False, 'import asyncio\n'), ((11712, 11757), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11733, 11757), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12467, 12531), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12477, 12531), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13405, 13429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13427, 13429), False, 'import asyncio\n'), ((15612, 15636), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15634, 15636), False, 'import asyncio\n'), ((18734, 18765), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18755, 18765), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3807, 3829), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3813, 3829), True, 'import numpy as np\n'), ((11946, 11958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11956, 11958), False, 'import uuid\n'), ((19408, 19551), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19416, 19551), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
from llama_index_manager import LLAMA_Index_Manager
from llama_index import SimpleDirectoryReader
manager = LLAMA_Index_Manager('vigilant-yeti-400300', 'oi-hackathon', 'blah/blah/eriks_vector_index')
# Retrieve vector store (If you put a path that doesen't exist, it will return a new empty index)
index = manager.retrieve_index_from_gcs()
# Add docs from local directory
documents = SimpleDirectoryReader('./test_library', recursive=True).load_data()
for doc in documents:
index.insert(doc)
# Save index persistently back to gcs
manager.save_index_to_gcs_from_local(index, 'oi-hackathon', 'blah/blah/eriks_vector_index')
print(manager.retrieve_context([{ 'message': 'What is the use of Gaussian Micture Models here?' }]))
# Now you can retrieve the index from the gcs path again whenever you want and continue adding docs to it and retrieving context from it. | [
"llama_index_manager.LLAMA_Index_Manager",
"llama_index.SimpleDirectoryReader"
] | [((109, 204), 'llama_index_manager.LLAMA_Index_Manager', 'LLAMA_Index_Manager', (['"""vigilant-yeti-400300"""', '"""oi-hackathon"""', '"""blah/blah/eriks_vector_index"""'], {}), "('vigilant-yeti-400300', 'oi-hackathon',\n 'blah/blah/eriks_vector_index')\n", (128, 204), False, 'from llama_index_manager import LLAMA_Index_Manager\n'), ((387, 442), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./test_library"""'], {'recursive': '(True)'}), "('./test_library', recursive=True)\n", (408, 442), False, 'from llama_index import SimpleDirectoryReader\n')] |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
import os
from getpass import getpass
if os.getenv("HONEYHIVE_API_KEY") is None:
os.environ["HONEYHIVE_API_KEY"] = getpass(
"Paste your HoneyHive key from:"
" https://app.honeyhive.ai/settings/account\n"
)
print("HoneyHive API key configured")
get_ipython().system('pip install llama-index')
from llama_index.core.callbacks import CallbackManager
from llama_index.core.callbacks import LlamaDebugHandler
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
SimpleKeywordTableIndex,
StorageContext,
)
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from honeyhive.utils.llamaindex_tracer import HoneyHiveLlamaIndexTracer
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-4", temperature=0)
import llama_index.core
from llama_index.core import set_global_handler
set_global_handler(
"honeyhive",
project="My LlamaIndex Project",
name="My LlamaIndex Pipeline",
api_key=os.environ["HONEYHIVE_API_KEY"],
)
hh_tracer = llama_index.core.global_handler
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
hh_tracer = HoneyHiveLlamaIndexTracer(
project="My LlamaIndex Project",
name="My LlamaIndex Pipeline",
api_key=os.environ["HONEYHIVE_API_KEY"],
)
callback_manager = CallbackManager([llama_debug, hh_tracer])
Settings.callback_manager = callback_manager
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
docs = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(docs)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response, sep="\n") | [
"llama_index.core.callbacks.LlamaDebugHandler",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.set_global_handler",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI"
] | [((1229, 1265), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0)'}), "(model='gpt-4', temperature=0)\n", (1235, 1265), False, 'from llama_index.llms.openai import OpenAI\n'), ((1343, 1484), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""honeyhive"""'], {'project': '"""My LlamaIndex Project"""', 'name': '"""My LlamaIndex Pipeline"""', 'api_key': "os.environ['HONEYHIVE_API_KEY']"}), "('honeyhive', project='My LlamaIndex Project', name=\n 'My LlamaIndex Pipeline', api_key=os.environ['HONEYHIVE_API_KEY'])\n", (1361, 1484), False, 'from llama_index.core import set_global_handler\n'), ((1560, 1602), 'llama_index.core.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (1577, 1602), False, 'from llama_index.core.callbacks import LlamaDebugHandler\n'), ((1616, 1751), 'honeyhive.utils.llamaindex_tracer.HoneyHiveLlamaIndexTracer', 'HoneyHiveLlamaIndexTracer', ([], {'project': '"""My LlamaIndex Project"""', 'name': '"""My LlamaIndex Pipeline"""', 'api_key': "os.environ['HONEYHIVE_API_KEY']"}), "(project='My LlamaIndex Project', name=\n 'My LlamaIndex Pipeline', api_key=os.environ['HONEYHIVE_API_KEY'])\n", (1641, 1751), False, 'from honeyhive.utils.llamaindex_tracer import HoneyHiveLlamaIndexTracer\n'), ((1782, 1823), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug, hh_tracer]'], {}), '([llama_debug, hh_tracer])\n', (1797, 1823), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((2192, 2229), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (2223, 2229), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext\n'), ((115, 142), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (124, 142), False, 'import os\n'), ((187, 282), 'getpass.getpass', 'getpass', (['"""Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n"""'], {}), "(\n 'Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n'\n )\n", (194, 282), False, 'from getpass import getpass\n'), ((489, 519), 'os.getenv', 'os.getenv', (['"""HONEYHIVE_API_KEY"""'], {}), "('HONEYHIVE_API_KEY')\n", (498, 519), False, 'import os\n'), ((567, 662), 'getpass.getpass', 'getpass', (['"""Paste your HoneyHive key from: https://app.honeyhive.ai/settings/account\n"""'], {}), "(\n 'Paste your HoneyHive key from: https://app.honeyhive.ai/settings/account\\n'\n )\n", (574, 662), False, 'from getpass import getpass\n'), ((305, 336), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (314, 336), False, 'import os\n'), ((2125, 2169), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data/paul_graham/"""'], {}), "('./data/paul_graham/')\n", (2146, 2169), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext\n')] |
from llama_index.core import KnowledgeGraphIndex
from llama_index.core import StorageContext, load_index_from_storage
import llama_index.core
llama_index.core.set_global_handler("langfuse")
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="math_index_persist")
# load index
query_engine = load_index_from_storage(storage_context).as_query_engine()
# query
query = "What are two prominent early number theorists?"
result = query_engine.query(query)
print(result) | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage"
] | [((236, 298), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""math_index_persist"""'}), "(persist_dir='math_index_persist')\n", (264, 298), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((328, 368), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (351, 368), False, 'from llama_index.core import StorageContext, load_index_from_storage\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1111, 1126), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1184, 1229), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1214, 1229), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1283, 1324), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1309, 1324), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1380, 1413), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1398, 1413), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1464, 1495), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1480, 1495), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""
File name: prepare_chain_4_chat.py
Author: Luigi Saetta
Date created: 2023-01-04
Date last modified: 2023-03-03
Python Version: 3.9
Description:
This module provides a function to initialize the RAG chain
for chat using the message history
Usage:
Import this module into other scripts to use its functions.
Example:
from prepare_chain_4_chat import create_chat_engine
License:
This code is released under the MIT License.
Notes:
This is a part of a set of demo showing how to use Oracle Vector DB,
OCI GenAI service, Oracle GenAI Embeddings, to build a RAG solution,
where all he data (text + embeddings) are stored in Oracle DB 23c
Now it can use for LLM: OCI, Mistral 8x7B
Warnings:
This module is in development, may change in future versions.
"""
import os
import logging
import llama_index
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.callbacks import CallbackManager
from tokenizers import Tokenizer
from llama_index.callbacks import TokenCountingHandler
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.llms import MistralAI
from llama_index.memory import ChatMemoryBuffer
import ads
from ads.llm import GenerativeAIEmbeddings, GenerativeAI
# COHERE_KEY is used for reranker
# MISTRAL_KEY for LLM
from config_private import COMPARTMENT_OCID, ENDPOINT, MISTRAL_API_KEY, COHERE_API_KEY
from config import (
EMBED_MODEL_TYPE,
EMBED_MODEL,
TOKENIZER,
GEN_MODEL,
MAX_TOKENS,
TEMPERATURE,
TOP_K,
TOP_N,
ADD_RERANKER,
RERANKER_MODEL,
RERANKER_ID,
CHAT_MODE,
MEMORY_TOKEN_LIMIT,
ADD_PHX_TRACING,
PHX_PORT,
PHX_HOST,
)
from oci_utils import load_oci_config, print_configuration
from oracle_vector_db import OracleVectorStore
from oci_baai_reranker import OCIBAAIReranker
from oci_llama_reranker import OCILLamaReranker
# added phx tracing
if ADD_PHX_TRACING:
import phoenix as px
# Configure logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
#
# This module now expose directly the factory methods for all the single components (llm, etc)
# the philosophy of the factory methods is that they're taking all the infos from the config
# module... so as few parameters as possible
#
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
logging.info(text_md)
logging.info(p.get_template())
#
# enables to plug different GEN_MODELS
# for now: OCI, LLAMA2 70 B, MISTRAL
#
def create_llm(auth=None):
# this check is to avoid mistakes in config.py
model_list = ["OCI", "LLAMA", "MISTRAL"]
if GEN_MODEL not in model_list:
raise ValueError(
f"The value {GEN_MODEL} is not supported. Choose a value in {model_list} for the GenAI model."
)
llm = None
# to reduce code (29/02)
# common params for all OCI genAI models
common_oci_params = {
"auth": auth,
"compartment_id": COMPARTMENT_OCID,
"max_tokens": MAX_TOKENS,
"temperature": TEMPERATURE,
"truncate": "END",
"client_kwargs": {"service_endpoint": ENDPOINT},
}
if GEN_MODEL == "OCI":
llm = GenerativeAI(
name="cohere.command",
**common_oci_params,
)
if GEN_MODEL == "LLAMA":
llm = GenerativeAI(name="meta.llama-2-70b-chat", **common_oci_params)
if GEN_MODEL == "MISTRAL":
llm = MistralAI(
api_key=MISTRAL_API_KEY,
model="mistral-medium",
temperature=TEMPERATURE,
max_tokens=MAX_TOKENS,
)
return llm
def create_reranker(auth=None, verbose=False):
model_list = ["COHERE", "OCI_BAAI"]
if RERANKER_MODEL not in model_list:
raise ValueError(
f"The value {RERANKER_MODEL} is not supported. Choose a value in {model_list} for the Reranker model."
)
reranker = None
if RERANKER_MODEL == "COHERE":
reranker = CohereRerank(api_key=COHERE_API_KEY, top_n=TOP_N)
# reranker model deployed as MD in OCI DS
if RERANKER_MODEL == "OCI_BAAI":
baai_reranker = OCIBAAIReranker(
auth=auth, deployment_id=RERANKER_ID, region="eu-frankfurt-1"
)
reranker = OCILLamaReranker(
oci_reranker=baai_reranker, top_n=TOP_N, verbose=verbose
)
return reranker
def create_embedding_model(auth=None):
model_list = ["OCI"]
if EMBED_MODEL_TYPE not in model_list:
raise ValueError(
f"The value {EMBED_MODEL_TYPE} is not supported. Choose a value in {model_list} for the Embeddings model."
)
embed_model = None
if EMBED_MODEL_TYPE == "OCI":
embed_model = GenerativeAIEmbeddings(
auth=auth,
compartment_id=COMPARTMENT_OCID,
model=EMBED_MODEL,
# added since in this chat the input text for the query
# can be rather long (it is a condensed query, that takes also the history)
truncate="END",
# Optionally you can specify keyword arguments for the OCI client
# e.g. service_endpoint.
client_kwargs={"service_endpoint": ENDPOINT},
)
return embed_model
#
# the entire chain is built here
#
def create_chat_engine(token_counter=None, verbose=False):
logging.info("calling create_chat_engine()...")
# for now the only supported here...
print_configuration()
if ADD_PHX_TRACING:
os.environ["PHOENIX_PORT"] = PHX_PORT
os.environ["PHOENIX_HOST"] = PHX_HOST
px.launch_app()
llama_index.set_global_handler("arize_phoenix")
# load security info needed for OCI
oci_config = load_oci_config()
api_keys_config = ads.auth.api_keys(oci_config)
# this is to embed the question
embed_model = create_embedding_model(auth=api_keys_config)
# this is the custom class to access Oracle DB as Vectore Store
v_store = OracleVectorStore(verbose=False)
# this is to access OCI or MISTRAL GenAI service
llm = create_llm(auth=api_keys_config)
# this part has been added to count the total # of tokens
cohere_tokenizer = Tokenizer.from_pretrained(TOKENIZER)
token_counter = TokenCountingHandler(tokenizer=cohere_tokenizer.encode)
callback_manager = CallbackManager([token_counter])
# integrate OCI/Mistral in llama-index
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=embed_model, callback_manager=callback_manager
)
index = VectorStoreIndex.from_vector_store(
vector_store=v_store, service_context=service_context
)
memory = ChatMemoryBuffer.from_defaults(
token_limit=MEMORY_TOKEN_LIMIT, tokenizer_fn=cohere_tokenizer.encode
)
# the whole chain (query string -> embed query -> retrieval ->
# reranker -> context, query-> GenAI -> response)
# is wrapped in the chat engine
# here we could plug a reranker improving the quality
node_postprocessors = None
if ADD_RERANKER == True:
reranker = create_reranker(auth=api_keys_config)
node_postprocessors = [reranker]
else:
pass
chat_engine = index.as_chat_engine(
chat_mode=CHAT_MODE,
memory=memory,
verbose=False,
similarity_top_k=TOP_K,
node_postprocessors=node_postprocessors,
)
# to add a blank line in the log
logging.info("")
return chat_engine, token_counter
| [
"llama_index.postprocessor.cohere_rerank.CohereRerank",
"llama_index.llms.MistralAI",
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.TokenCountingHandler",
"llama_index.set_global_handler",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.callbacks.CallbackManager",
"llama_index.memory.ChatMemoryBuffer.from_defaults"
] | [((1998, 2094), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (2017, 2094), False, 'import logging\n'), ((5459, 5506), 'logging.info', 'logging.info', (['"""calling create_chat_engine()..."""'], {}), "('calling create_chat_engine()...')\n", (5471, 5506), False, 'import logging\n'), ((5553, 5574), 'oci_utils.print_configuration', 'print_configuration', ([], {}), '()\n', (5572, 5574), False, 'from oci_utils import load_oci_config, print_configuration\n'), ((5830, 5847), 'oci_utils.load_oci_config', 'load_oci_config', ([], {}), '()\n', (5845, 5847), False, 'from oci_utils import load_oci_config, print_configuration\n'), ((5871, 5900), 'ads.auth.api_keys', 'ads.auth.api_keys', (['oci_config'], {}), '(oci_config)\n', (5888, 5900), False, 'import ads\n'), ((6084, 6116), 'oracle_vector_db.OracleVectorStore', 'OracleVectorStore', ([], {'verbose': '(False)'}), '(verbose=False)\n', (6101, 6116), False, 'from oracle_vector_db import OracleVectorStore\n'), ((6300, 6336), 'tokenizers.Tokenizer.from_pretrained', 'Tokenizer.from_pretrained', (['TOKENIZER'], {}), '(TOKENIZER)\n', (6325, 6336), False, 'from tokenizers import Tokenizer\n'), ((6357, 6412), 'llama_index.callbacks.TokenCountingHandler', 'TokenCountingHandler', ([], {'tokenizer': 'cohere_tokenizer.encode'}), '(tokenizer=cohere_tokenizer.encode)\n', (6377, 6412), False, 'from llama_index.callbacks import TokenCountingHandler\n'), ((6437, 6469), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (6452, 6469), False, 'from llama_index.callbacks import CallbackManager\n'), ((6536, 6637), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'callback_manager': 'callback_manager'}), '(llm=llm, embed_model=embed_model,\n callback_manager=callback_manager)\n', (6564, 6637), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((6661, 6755), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'v_store', 'service_context': 'service_context'}), '(vector_store=v_store, service_context=\n service_context)\n', (6695, 6755), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((6779, 6884), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': 'MEMORY_TOKEN_LIMIT', 'tokenizer_fn': 'cohere_tokenizer.encode'}), '(token_limit=MEMORY_TOKEN_LIMIT, tokenizer_fn\n =cohere_tokenizer.encode)\n', (6809, 6884), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((7539, 7555), 'logging.info', 'logging.info', (['""""""'], {}), "('')\n", (7551, 7555), False, 'import logging\n'), ((2485, 2506), 'logging.info', 'logging.info', (['text_md'], {}), '(text_md)\n', (2497, 2506), False, 'import logging\n'), ((3316, 3372), 'ads.llm.GenerativeAI', 'GenerativeAI', ([], {'name': '"""cohere.command"""'}), "(name='cohere.command', **common_oci_params)\n", (3328, 3372), False, 'from ads.llm import GenerativeAIEmbeddings, GenerativeAI\n'), ((3451, 3514), 'ads.llm.GenerativeAI', 'GenerativeAI', ([], {'name': '"""meta.llama-2-70b-chat"""'}), "(name='meta.llama-2-70b-chat', **common_oci_params)\n", (3463, 3514), False, 'from ads.llm import GenerativeAIEmbeddings, GenerativeAI\n'), ((3560, 3671), 'llama_index.llms.MistralAI', 'MistralAI', ([], {'api_key': 'MISTRAL_API_KEY', 'model': '"""mistral-medium"""', 'temperature': 'TEMPERATURE', 'max_tokens': 'MAX_TOKENS'}), "(api_key=MISTRAL_API_KEY, model='mistral-medium', temperature=\n TEMPERATURE, max_tokens=MAX_TOKENS)\n", (3569, 3671), False, 'from llama_index.llms import MistralAI\n'), ((4100, 4149), 'llama_index.postprocessor.cohere_rerank.CohereRerank', 'CohereRerank', ([], {'api_key': 'COHERE_API_KEY', 'top_n': 'TOP_N'}), '(api_key=COHERE_API_KEY, top_n=TOP_N)\n', (4112, 4149), False, 'from llama_index.postprocessor.cohere_rerank import CohereRerank\n'), ((4258, 4336), 'oci_baai_reranker.OCIBAAIReranker', 'OCIBAAIReranker', ([], {'auth': 'auth', 'deployment_id': 'RERANKER_ID', 'region': '"""eu-frankfurt-1"""'}), "(auth=auth, deployment_id=RERANKER_ID, region='eu-frankfurt-1')\n", (4273, 4336), False, 'from oci_baai_reranker import OCIBAAIReranker\n'), ((4379, 4453), 'oci_llama_reranker.OCILLamaReranker', 'OCILLamaReranker', ([], {'oci_reranker': 'baai_reranker', 'top_n': 'TOP_N', 'verbose': 'verbose'}), '(oci_reranker=baai_reranker, top_n=TOP_N, verbose=verbose)\n', (4395, 4453), False, 'from oci_llama_reranker import OCILLamaReranker\n'), ((4843, 4995), 'ads.llm.GenerativeAIEmbeddings', 'GenerativeAIEmbeddings', ([], {'auth': 'auth', 'compartment_id': 'COMPARTMENT_OCID', 'model': 'EMBED_MODEL', 'truncate': '"""END"""', 'client_kwargs': "{'service_endpoint': ENDPOINT}"}), "(auth=auth, compartment_id=COMPARTMENT_OCID, model=\n EMBED_MODEL, truncate='END', client_kwargs={'service_endpoint': ENDPOINT})\n", (4865, 4995), False, 'from ads.llm import GenerativeAIEmbeddings, GenerativeAI\n'), ((5700, 5715), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (5713, 5715), True, 'import phoenix as px\n'), ((5724, 5771), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""arize_phoenix"""'], {}), "('arize_phoenix')\n", (5754, 5771), False, 'import llama_index\n')] |
from io import BytesIO
from flask import Flask, jsonify
import os
# import tweepy
from dotenv import load_dotenv
from flask import request,jsonify
import snscrape.modules.twitter as snstwitter
import requests
from goose3 import Goose
from wordcloud import WordCloud, STOPWORDS
import plotly.graph_objs as go
import json
import plotly
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import base64
import pandas as pd
# from flask import send_file
from flask import send_file
import datetime
import plotly.express as px
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import logging
import sys
from llama_index import GPTVectorStoreIndex, TwitterTweetReader
import os
import llama_index
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
import requests
from bs4 import BeautifulSoup
from urllib.parse import quote
import serpapi
from serpapi import GoogleSearch
import os
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
from langchain.memory import ConversationBufferMemory
# from langchain.utilities import WikipediaAPIWrapper
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from dotenv import load_dotenv
from flask_cors import CORS
from llama_index import SimpleDirectoryReader
from llama_index import GPTVectorStoreIndex
import llama_index
import io
import base64
import matplotlib.pyplot as plt
import seaborn as sns
app = Flask(__name__)
CORS(app)
twitterData = None
queryString = None
# print(type(twitterData))
load_dotenv()
print(os.getenv("HUGGINGFACE_API"))
print(os.getenv('OPENAI_API_KEY'))
print(os.getenv('BEARER_TOKEN'))
os.getenv('OPENAI_API_KEY')
@app.route('/')
def hello_geek():
return '<h1>Hello from Flask & Docker</h2>'
@app.route('/twitter')
def twitter():
query = request.args['query']
retweet = 0
likecount = 0
hashtags = set([])
i=0
global twitterData
global queryString
print("Url: Twitter, data: ", twitterData)
print("Url: Twitter, query: ", queryString)
twitterData = snstwitter.TwitterSearchScraper(query).get_items()
for tweet in twitterData:
print("looping through tweets")
print(vars(tweet))
likecount += tweet.likeCount
retweet += tweet.retweetCount + tweet.quoteCount
if(tweet.hashtags != None):
for h in tweet.hashtags:
hashtags.add(h)
i+= 1
if(i==200):
break
tweets = {"likecount":likecount,"retweet":retweet,"hashtags":list(hashtags),"count":i}
print(tweets)
return jsonify({'result':tweets})
os.getenv('SERPAPI_API_KEY')
#For getting the realted link - by providing the URL
@app.route('/search', methods=['GET'])
def search():
article_url = request.args.get('url')
response = requests.get(article_url)
soup = BeautifulSoup(response.url, 'html.parser')
header = soup.find('h1').url.strip()
search_query = quote(header)
params = {
'q': search_query,
'hl': 'en',
'gl': 'us',
'api_key': os.getenv('SERPAPI_API_KEY')
}
search = GoogleSearch(params)
results = search.get_dict().get('organic_results', [])
links = [result['link'] for result in results]
return jsonify({'article_header': header, 'related_links': links})
# To use LLM to check the factual accuracy of the news
@app.route('/classify_news', methods=['GET'])
def classify_news():
prompt = request.args['url']
tool_names = ["serpapi"]
tools = load_tools(tool_names)
title_template = PromptTemplate(
input_variables = ['topic'],
template='To classify the news: {topic} in to the categories like murder, fire, accident, natural disaster, etc'
)
# script_template = PromptTemplate(
# input_variables = ['title', 'wikipedia_research'],
# #template='Look for the authenticity and the accuracy of the news listed: {title} provide the explanation of whether it is factually correct or is there any information present on wikipedia and also provide the correct answer or result if there is:{wikipedia_research} '
# template='Please verify the authenticity and accuracy of the news provided in the {title} by cross-referencing it with the corresponding {wikipedia_research} page. Examine the information available on Wikipedia and determine whether the news is factually correct or accurate. Additionally, if there is any conflicting or misleading information, please provide the correct answer or result based on your research from Wikipedia. '
# )
title_memory = ConversationBufferMemory(input_key='topic', memory_key='chat_history')
# script_memory = ConversationBufferMemory(input_key='title', memory_key='chat_history')
llm = OpenAI(temperature=0.9)
title_chain = LLMChain(llm=llm, prompt=title_template, verbose=True, output_key='title', memory=title_memory)
# script_chain = LLMChain(llm=llm, prompt=script_template, verbose=True, output_key='script', memory=script_memory)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
# wiki = WikipediaAPIWrapper()
if prompt:
title = title_chain.run(prompt)
a=agent.run(f"{prompt}. Also, provide the realted links")
return {
'title': title_memory.buffer,
'script': a
}
@app.route('/xyz')
def xyz():
query = request.args['query']
tweets = []
for tweet in snstwitter.TwitterProfileScraper(query).get_items():
tweets.append(tweet.date)
return tweets
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
headers = {"Authorization": "Bearer " + os.getenv('HUGGINGFACE_API') }
API_URL_PROP = "https://api-inference.huggingface.co/models/valurank/distilroberta-propaganda-2class"
API_URL_HATE = "https://api-inference.huggingface.co/models/IMSyPP/hate_speech_en"
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def queryprop(payload):
response = requests.post(API_URL_PROP, headers=headers, json=payload)
return response.json()
def query_hate(payload):
response = requests.post(API_URL_HATE, headers=headers, json=payload)
return response.json()
@app.route('/sentiment')
def sentiment():
query = request.args['query']
retweet = 0
likecount = 0
hashtags = []
senti=[]
i=0
positive=0
negative=0
neutral=0
global twitterData
global queryString
print("Url: Sentiment, data: ", twitterData)
twitterData = snstwitter.TwitterSearchScraper(query).get_items()
for tweet in twitterData:
if tweet.lang=="en":
i+=1
if(i==200):
break
sentence= tweet.rawContent
print(sentence)
sid_obj = SentimentIntensityAnalyzer()
sentiment_dict = sid_obj.polarity_scores([sentence])
print(sentiment_dict['neg']*100, "% Negative")
print(sentiment_dict['pos']*100, "% Positive")
print("Review Overall Analysis", end = " ")
if sentiment_dict['compound'] >= 0.05 :
positive+=1
elif sentiment_dict['compound'] <= -0.05 :
negative+=1
else :
neutral+=1
senti={"positive":positive, "negative":negative, "neutral":neutral}
labels = list(senti.keys())
values = list(senti.values())
data = {"Target": ["Positive","Negative", "Neutral"], "Value": [positive, negative, neutral]}
df = pd.DataFrame(data)
target=["Positive","Negative", "Neutral"]
value=[positive, negative, neutral]
# palette_color = sns.color_palette('bright')
#plt.pie(value, labels=target, colors=palette_color, autopct='%.0f%%')
sns.barplot(x="Target" , y="Value", data=df, palette="Set2")
plt.title("Sentiment Analysis on the Tweets related to Article")
plt.savefig('senti.png')
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
# # return {"labels":labels, "values":values}
return send_file("./senti.png", mimetype='image/png')
@app.route('/sentiment_article')
def sentiment_article():
senti=[]
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
sentence1 = articles.cleaned_text
sid_obj = SentimentIntensityAnalyzer()
sentiment_dict = sid_obj.polarity_scores([sentence1])
print(sentiment_dict['neg']*100, "% Negative")
print(sentiment_dict['pos']*100, "% Positive")
print("Review Overall Analysis", end = " ")
if sentiment_dict['compound'] >= 0.05 :
senti.append("Positive")
elif sentiment_dict['compound'] <= -0.05 :
senti.append("Negative")
else :
senti.append("Neutral")
return jsonify({"result":senti,"pos":sentiment_dict})
@app.route('/summary')
def summary():
try:
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
output = query({
"inputs": articles.cleaned_text
})
print(output)
except:
return "Please put the relevant url article"
return jsonify({"result": output[0]['summary_text']})
@app.route('/cloud2')
def plotly_wordcloud2():
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
url = articles.cleaned_text
wordcloud = WordCloud(width=1280, height=853, margin=0,
colormap='Blues').generate(url)
wordcloud.to_file("./wordcloud.png")
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
return send_file("./wordcloud.png", mimetype='image/png')
@app.route('/propaganda')
def propaganda():
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
output = queryprop({
"inputs": articles.cleaned_text[0:600]
})
yes = output[0][0]['score']
no = 1 - yes
data = {"Target": ["Propagandastic","Non-Propagandastic"], "Value": [yes, no]}
df = pd.DataFrame(data)
sns.barplot(x="Target" , y="Value", data=df, palette="Set2")
plt.title("Propagandastic Evaluation of the Article")
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
plt.savefig('propaganda.png')
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
return send_file("./propaganda.png", mimetype='image/png')
# return base64_string
# return jsonify({"yes": yes, "no": no})
@app.route("/chat", methods=["GET"])
def chat():
# Get the query from the request body.
query = request.args['url']
# create an app in https://developer.twitter.com/en/apps
# create reader, specify twitter handles
reader = TwitterTweetReader(os.getenv('BEARER_TOKEN'))
documents = reader.load_data(["ANI"])
documents1 = reader.load_data(["ZeeNews"])
documents2 = reader.load_data(["TV9Bharatvarsh"])
documents3 = reader.load_data(["Republic_Bharat"])
documents4 = reader.load_data(["AajTak"])
# Create a new instance of the llama chatbot agent.
agent = llama_index.GPTVectorStoreIndex.from_documents(documents1+documents+documents2+documents3+documents4)
chat_engine = agent.as_chat_engine(verbose=True)
# Get the response from the llama chatbot agent.
response = chat_engine.chat(query)
# Return the response as JSON.
return jsonify({"response": response})
@app.route('/hate-speech')
def hate():
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
url = articles.cleaned_text
analyzer = SentimentIntensityAnalyzer()
# the object outputs the scores into a dict
sentiment_dict = analyzer.polarity_scores(url)
if sentiment_dict['compound'] >= 0.05 :
category = ("Positive ")
elif sentiment_dict['compound'] <= - 0.05 :
category = ("Negative ")
else :
category = ("Neutral ")
print(category)
if category == "Negative ":
res='Hate Speech'
else:
res='Not Hate Speech'
return jsonify({"sentiment":category,"verdict":res})
@app.route('/multi-class')
def category():
url = request.args['url']
# Print the output url.
print(url)
output=query_hate({
"inputs": [str(url)],
"keywords": ["LABEL_0", "LABEL_1", "LABEL_2", "LABEL_3"]})
# print(output[0])
result = {}
if url:
for data in output[0]:
if data['label'] == "LABEL_0":
result["ACCEPTABLE"] = round(data['score']*100, 2)
elif data['label'] == "LABEL_1":
result["INAPPROPRIATE"] = round(data['score']*100, 2)
elif data['label'] == "LABEL_2":
result["OFFENSIVE"] = round(data['score']*100, 2)
elif data['label'] == "LABEL_3":
result["VIOLENT"] = round(data['score']*100, 2)
labels = list(result.keys())
values = list(result.values())
data = {"Target":list(result.keys()) , "Value": list(result.values())}
df = pd.DataFrame(data)
sns.barplot(x="Target" , y="Value", data=df, palette="Set2")
plt.title("Hate Speech Params Detection present in Article")
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
plt.savefig('hate.png')
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
return send_file("./hate.png", mimetype='image/png')
# return jsonify({"result":result})
@app.route('/authenticity')
def auth():
url = request.args['url']
lis = []
df = pd.read_csv('blacklist.csv')
for i in range(len(df)):
lis.append(df.loc[i, "MBFC"])
for l in lis:
if(url.__contains__(l)):
return {"authentic":False}
return { "authentic": True }
@app.route('/bot-activity')
def botActivity():
url = request.args['url']
i=0
usernames = []
time = []
finalusername = []
for tweet in snstwitter.TwitterSearchScraper(url).get_items():
usernames.append(tweet.user.username)
time.append(tweet.date)
if(i==150):
break
i+=1
flag = False
for i in range(len(time)-1):
a = time[i]
b = time[i+1]
c = a-b
if(c.seconds <= 60):
finalusername.append(usernames[i+1])
print("username: ", finalusername)
if(len(finalusername) > 3):
flag = True
return jsonify({"bots":list(set(finalusername)),"flag":flag})
if __name__ == '__main__':
app.run(host="0.0.0.0",port=5000,debug=True)
| [
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((1520, 1535), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1525, 1535), False, 'from flask import Flask, jsonify\n'), ((1536, 1545), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (1540, 1545), False, 'from flask_cors import CORS\n'), ((1613, 1626), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1624, 1626), False, 'from dotenv import load_dotenv\n'), ((1734, 1761), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1743, 1761), False, 'import os\n'), ((2726, 2754), 'os.getenv', 'os.getenv', (['"""SERPAPI_API_KEY"""'], {}), "('SERPAPI_API_KEY')\n", (2735, 2754), False, 'import os\n'), ((1634, 1662), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_API"""'], {}), "('HUGGINGFACE_API')\n", (1643, 1662), False, 'import os\n'), ((1670, 1697), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1679, 1697), False, 'import os\n'), ((1705, 1730), 'os.getenv', 'os.getenv', (['"""BEARER_TOKEN"""'], {}), "('BEARER_TOKEN')\n", (1714, 1730), False, 'import os\n'), ((2699, 2726), 'flask.jsonify', 'jsonify', (["{'result': tweets}"], {}), "({'result': tweets})\n", (2706, 2726), False, 'from flask import request, jsonify\n'), ((2879, 2902), 'flask.request.args.get', 'request.args.get', (['"""url"""'], {}), "('url')\n", (2895, 2902), False, 'from flask import request, jsonify\n'), ((2918, 2943), 'requests.get', 'requests.get', (['article_url'], {}), '(article_url)\n', (2930, 2943), False, 'import requests\n'), ((2955, 2997), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.url', '"""html.parser"""'], {}), "(response.url, 'html.parser')\n", (2968, 2997), False, 'from bs4 import BeautifulSoup\n'), ((3059, 3072), 'urllib.parse.quote', 'quote', (['header'], {}), '(header)\n', (3064, 3072), False, 'from urllib.parse import quote\n'), ((3224, 3244), 'serpapi.GoogleSearch', 'GoogleSearch', (['params'], {}), '(params)\n', (3236, 3244), False, 'from serpapi import GoogleSearch\n'), ((3368, 3427), 'flask.jsonify', 'jsonify', (["{'article_header': header, 'related_links': links}"], {}), "({'article_header': header, 'related_links': links})\n", (3375, 3427), False, 'from flask import request, jsonify\n'), ((3627, 3649), 'langchain.agents.load_tools', 'load_tools', (['tool_names'], {}), '(tool_names)\n', (3637, 3649), False, 'from langchain.agents import load_tools\n'), ((3672, 3837), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['topic']", 'template': '"""To classify the news: {topic} in to the categories like murder, fire, accident, natural disaster, etc"""'}), "(input_variables=['topic'], template=\n 'To classify the news: {topic} in to the categories like murder, fire, accident, natural disaster, etc'\n )\n", (3686, 3837), False, 'from langchain.prompts import PromptTemplate\n'), ((4677, 4747), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'input_key': '"""topic"""', 'memory_key': '"""chat_history"""'}), "(input_key='topic', memory_key='chat_history')\n", (4701, 4747), False, 'from langchain.memory import ConversationBufferMemory\n'), ((4852, 4875), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (4858, 4875), False, 'from langchain.llms import OpenAI\n'), ((4895, 4994), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'title_template', 'verbose': '(True)', 'output_key': '"""title"""', 'memory': 'title_memory'}), "(llm=llm, prompt=title_template, verbose=True, output_key='title',\n memory=title_memory)\n", (4903, 4994), False, 'from langchain.chains import LLMChain, SequentialChain\n'), ((5123, 5202), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (5139, 5202), False, 'from langchain.agents import initialize_agent\n'), ((6021, 6074), 'requests.post', 'requests.post', (['API_URL'], {'headers': 'headers', 'json': 'payload'}), '(API_URL, headers=headers, json=payload)\n', (6034, 6074), False, 'import requests\n'), ((6136, 6194), 'requests.post', 'requests.post', (['API_URL_PROP'], {'headers': 'headers', 'json': 'payload'}), '(API_URL_PROP, headers=headers, json=payload)\n', (6149, 6194), False, 'import requests\n'), ((6257, 6315), 'requests.post', 'requests.post', (['API_URL_HATE'], {'headers': 'headers', 'json': 'payload'}), '(API_URL_HATE, headers=headers, json=payload)\n', (6270, 6315), False, 'import requests\n'), ((7658, 7676), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (7670, 7676), True, 'import pandas as pd\n'), ((7894, 7953), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Target"""', 'y': '"""Value"""', 'data': 'df', 'palette': '"""Set2"""'}), "(x='Target', y='Value', data=df, palette='Set2')\n", (7905, 7953), True, 'import seaborn as sns\n'), ((7959, 8023), 'matplotlib.pyplot.title', 'plt.title', (['"""Sentiment Analysis on the Tweets related to Article"""'], {}), "('Sentiment Analysis on the Tweets related to Article')\n", (7968, 8023), True, 'import matplotlib.pyplot as plt\n'), ((8029, 8053), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""senti.png"""'], {}), "('senti.png')\n", (8040, 8053), True, 'import matplotlib.pyplot as plt\n'), ((8567, 8613), 'flask.send_file', 'send_file', (['"""./senti.png"""'], {'mimetype': '"""image/png"""'}), "('./senti.png', mimetype='image/png')\n", (8576, 8613), False, 'from flask import send_file\n'), ((8742, 8749), 'goose3.Goose', 'Goose', ([], {}), '()\n', (8747, 8749), False, 'from goose3 import Goose\n'), ((8836, 8864), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (8862, 8864), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((9285, 9334), 'flask.jsonify', 'jsonify', (["{'result': senti, 'pos': sentiment_dict}"], {}), "({'result': senti, 'pos': sentiment_dict})\n", (9292, 9334), False, 'from flask import request, jsonify\n'), ((9654, 9700), 'flask.jsonify', 'jsonify', (["{'result': output[0]['summary_text']}"], {}), "({'result': output[0]['summary_text']})\n", (9661, 9700), False, 'from flask import request, jsonify\n'), ((9791, 9798), 'goose3.Goose', 'Goose', ([], {}), '()\n', (9796, 9798), False, 'from goose3 import Goose\n'), ((10024, 10071), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (10034, 10071), True, 'import matplotlib.pyplot as plt\n'), ((10076, 10091), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10084, 10091), True, 'import matplotlib.pyplot as plt\n'), ((10096, 10117), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)', 'y': '(0)'}), '(x=0, y=0)\n', (10107, 10117), True, 'import matplotlib.pyplot as plt\n'), ((10355, 10405), 'flask.send_file', 'send_file', (['"""./wordcloud.png"""'], {'mimetype': '"""image/png"""'}), "('./wordcloud.png', mimetype='image/png')\n", (10364, 10405), False, 'from flask import send_file\n'), ((10503, 10510), 'goose3.Goose', 'Goose', ([], {}), '()\n', (10508, 10510), False, 'from goose3 import Goose\n'), ((10769, 10787), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (10781, 10787), True, 'import pandas as pd\n'), ((10792, 10851), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Target"""', 'y': '"""Value"""', 'data': 'df', 'palette': '"""Set2"""'}), "(x='Target', y='Value', data=df, palette='Set2')\n", (10803, 10851), True, 'import seaborn as sns\n'), ((10857, 10910), 'matplotlib.pyplot.title', 'plt.title', (['"""Propagandastic Evaluation of the Article"""'], {}), "('Propagandastic Evaluation of the Article')\n", (10866, 10910), True, 'import matplotlib.pyplot as plt\n'), ((11085, 11114), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""propaganda.png"""'], {}), "('propaganda.png')\n", (11096, 11114), True, 'import matplotlib.pyplot as plt\n'), ((11352, 11403), 'flask.send_file', 'send_file', (['"""./propaganda.png"""'], {'mimetype': '"""image/png"""'}), "('./propaganda.png', mimetype='image/png')\n", (11361, 11403), False, 'from flask import send_file\n'), ((12085, 12198), 'llama_index.GPTVectorStoreIndex.from_documents', 'llama_index.GPTVectorStoreIndex.from_documents', (['(documents1 + documents + documents2 + documents3 + documents4)'], {}), '(documents1 + documents +\n documents2 + documents3 + documents4)\n', (12131, 12198), False, 'import llama_index\n'), ((12380, 12411), 'flask.jsonify', 'jsonify', (["{'response': response}"], {}), "({'response': response})\n", (12387, 12411), False, 'from flask import request, jsonify\n'), ((12495, 12502), 'goose3.Goose', 'Goose', ([], {}), '()\n', (12500, 12502), False, 'from goose3 import Goose\n'), ((12584, 12612), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (12610, 12612), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((13057, 13105), 'flask.jsonify', 'jsonify', (["{'sentiment': category, 'verdict': res}"], {}), "({'sentiment': category, 'verdict': res})\n", (13064, 13105), False, 'from flask import request, jsonify\n'), ((14023, 14041), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (14035, 14041), True, 'import pandas as pd\n'), ((14051, 14110), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Target"""', 'y': '"""Value"""', 'data': 'df', 'palette': '"""Set2"""'}), "(x='Target', y='Value', data=df, palette='Set2')\n", (14062, 14110), True, 'import seaborn as sns\n'), ((14116, 14176), 'matplotlib.pyplot.title', 'plt.title', (['"""Hate Speech Params Detection present in Article"""'], {}), "('Hate Speech Params Detection present in Article')\n", (14125, 14176), True, 'import matplotlib.pyplot as plt\n'), ((14351, 14374), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hate.png"""'], {}), "('hate.png')\n", (14362, 14374), True, 'import matplotlib.pyplot as plt\n'), ((14612, 14657), 'flask.send_file', 'send_file', (['"""./hate.png"""'], {'mimetype': '"""image/png"""'}), "('./hate.png', mimetype='image/png')\n", (14621, 14657), False, 'from flask import send_file\n'), ((14805, 14833), 'pandas.read_csv', 'pd.read_csv', (['"""blacklist.csv"""'], {}), "('blacklist.csv')\n", (14816, 14833), True, 'import pandas as pd\n'), ((3175, 3203), 'os.getenv', 'os.getenv', (['"""SERPAPI_API_KEY"""'], {}), "('SERPAPI_API_KEY')\n", (3184, 3203), False, 'import os\n'), ((5770, 5798), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_API"""'], {}), "('HUGGINGFACE_API')\n", (5779, 5798), False, 'import os\n'), ((9432, 9439), 'goose3.Goose', 'Goose', ([], {}), '()\n', (9437, 9439), False, 'from goose3 import Goose\n'), ((11741, 11766), 'os.getenv', 'os.getenv', (['"""BEARER_TOKEN"""'], {}), "('BEARER_TOKEN')\n", (11750, 11766), False, 'import os\n'), ((2142, 2180), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['query'], {}), '(query)\n', (2173, 2180), True, 'import snscrape.modules.twitter as snstwitter\n'), ((5541, 5580), 'snscrape.modules.twitter.TwitterProfileScraper', 'snstwitter.TwitterProfileScraper', (['query'], {}), '(query)\n', (5573, 5580), True, 'import snscrape.modules.twitter as snstwitter\n'), ((6654, 6692), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['query'], {}), '(query)\n', (6685, 6692), True, 'import snscrape.modules.twitter as snstwitter\n'), ((6926, 6954), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (6952, 6954), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((9881, 9942), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': '(1280)', 'height': '(853)', 'margin': '(0)', 'colormap': '"""Blues"""'}), "(width=1280, height=853, margin=0, colormap='Blues')\n", (9890, 9942), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((15185, 15221), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['url'], {}), '(url)\n', (15216, 15221), True, 'import snscrape.modules.twitter as snstwitter\n')] |
import sqlite3
import pandas as pd
import llama_index
import os
import openai
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
text
)
from llama_index import SQLDatabase, ServiceContext
from llama_index.llms import OpenAI
from llama_index.indices.struct_store.sql_query import NLSQLTableQueryEngine
# Create a new SQLite database (or connect to an existing one)
def create_and_load_db():
# Connect to the SQLite database (or create a new one)
conn = sqlite3.connect('company_info.db')
# Read the CSV file into a Pandas DataFrame
df = pd.read_csv('C:\\Users\\Nsahni\\Downloads\\Github\\Bynd\\company_information_db.csv')
# Write the data to a SQLite table
df.to_sql('company_table', conn, if_exists='replace', index=False)
return conn
def execute_query(conn, query):
# Query the table
query_result = pd.read_sql_query(query, conn)
print(query_result)
conn = create_and_load_db()
with open('config.txt', 'r') as f:
openai.api_key = f.read().strip()
llm = OpenAI(temperature=0, model="gpt-4")
engine = create_engine('sqlite:///company_info.db')
metadata_obj = MetaData()
metadata_obj.create_all(engine)
service_context = ServiceContext.from_defaults(llm=llm)
sql_database = SQLDatabase(engine, include_tables=['company_table'])
metadata_obj = MetaData()
# with engine.connect() as con:
# rows = con.execute(text("SELECT * FROM company_table where market_cap > 10000000"))
# for row in rows:
# print(row)
query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["company_table"],
)
query_str = input("Please enter the query you are looking for: ")
response = query_engine.query(query_str)
# response_df = pd.DataFrame(response)
print(response)
# print(response)
# execute_query(conn, "SELECT * FROM company_table limit 10")
# Close the connection
conn.close() | [
"llama_index.llms.OpenAI",
"llama_index.ServiceContext.from_defaults",
"llama_index.SQLDatabase",
"llama_index.indices.struct_store.sql_query.NLSQLTableQueryEngine"
] | [((1122, 1158), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (1128, 1158), False, 'from llama_index.llms import OpenAI\n'), ((1168, 1210), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///company_info.db"""'], {}), "('sqlite:///company_info.db')\n", (1181, 1210), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, text\n'), ((1226, 1236), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (1234, 1236), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, text\n'), ((1287, 1324), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (1315, 1324), False, 'from llama_index import SQLDatabase, ServiceContext\n'), ((1340, 1393), 'llama_index.SQLDatabase', 'SQLDatabase', (['engine'], {'include_tables': "['company_table']"}), "(engine, include_tables=['company_table'])\n", (1351, 1393), False, 'from llama_index import SQLDatabase, ServiceContext\n'), ((1409, 1419), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (1417, 1419), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, text\n'), ((1604, 1678), 'llama_index.indices.struct_store.sql_query.NLSQLTableQueryEngine', 'NLSQLTableQueryEngine', ([], {'sql_database': 'sql_database', 'tables': "['company_table']"}), "(sql_database=sql_database, tables=['company_table'])\n", (1625, 1678), False, 'from llama_index.indices.struct_store.sql_query import NLSQLTableQueryEngine\n'), ((578, 612), 'sqlite3.connect', 'sqlite3.connect', (['"""company_info.db"""'], {}), "('company_info.db')\n", (593, 612), False, 'import sqlite3\n'), ((671, 761), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Nsahni\\\\Downloads\\\\Github\\\\Bynd\\\\company_information_db.csv"""'], {}), "(\n 'C:\\\\Users\\\\Nsahni\\\\Downloads\\\\Github\\\\Bynd\\\\company_information_db.csv')\n", (682, 761), True, 'import pandas as pd\n'), ((958, 988), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'conn'], {}), '(query, conn)\n', (975, 988), True, 'import pandas as pd\n')] |
import os
from typing import Optional, Dict
import openai
import pandas as pd
from langchain.llms import OpenAI
import llama_index
from llama_index.readers.schema.base import Document
from llama_index import SimpleWebPageReader, QuestionAnswerPrompt
from llama_index import ServiceContext, StorageContext, load_index_from_storage
from llama_index import LLMPredictor, OpenAIEmbedding
from llama_index.indices.vector_store.base import VectorStore
from mindsdb.integrations.libs.base import BaseMLEngine
from mindsdb.utilities.config import Config
def _validate_prompt_template(prompt_template: str):
if '{context_str}' not in prompt_template or '{query_str}' not in prompt_template:
raise Exception(
"Provided prompt template is invalid, missing `{context_str}`, `{query_str}`. Please ensure both placeholders are present and try again.") # noqa
class LlamaIndexHandler(BaseMLEngine):
""" Integration with the LlamaIndex data framework for LLM applications. """
name = 'llama_index'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generative = True
self.default_index_class = 'GPTVectorStoreIndex'
self.supported_index_class = ['GPTVectorStoreIndex']
self.default_reader = 'DFReader'
self.supported_reader = ['DFReader', 'SimpleWebPageReader']
@staticmethod
def create_validation(target, args=None, **kwargs):
if 'prompt_template' in args['using']:
_validate_prompt_template(args['using']['prompt_template'])
if args['using'].get('mode') == 'conversational':
for param in ('user_column', 'assistant_column'):
if param not in args['using']:
raise Exception(f'Conversational mode requires {param} parameter')
def create(self, target: str, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None) -> None:
if 'using' not in args:
raise Exception("LlamaIndex engine requires a USING clause! Refer to its documentation for more details.")
if 'index_class' not in args['using']:
args['using']['index_class'] = self.default_index_class
elif args['using']['index_class'] not in self.supported_index_class:
raise Exception(f"Invalid index class argument. Please use one of {self.supported_index_class}")
if 'reader' not in args['using']:
args['using']['reader'] = self.default_reader
elif args['using']['reader'] not in self.supported_reader:
raise Exception(f"Invalid operation mode. Please use one of {self.supported_reader}")
# workaround to create llama model without input data
if df is None or df.empty:
df = pd.DataFrame([{'text': ''}])
if args['using']['reader'] == 'DFReader':
dstrs = df.apply(lambda x: ', '.join([f'{col}: {str(entry)}' for col, entry in zip(df.columns, x)]), axis=1)
reader = list(map(lambda x: Document(x), dstrs.tolist()))
elif args['using']['reader'] == 'SimpleWebPageReader':
if 'source_url_link' not in args['using']:
raise Exception("SimpleWebPageReader requires a `source_url_link` parameter. Refer to LlamaIndex documentation for more details.") # noqa
reader = SimpleWebPageReader(html_to_text=True).load_data([args['using']['source_url_link']])
else:
raise Exception(f"Invalid operation mode. Please use one of {self.supported_reader}.")
self.model_storage.json_set('args', args)
index = self._setup_index(reader)
path = self.model_storage.folder_get('context')
index.storage_context.persist(persist_dir=path)
self.model_storage.folder_sync('context')
def update(self, args) -> None:
prompt_template = args['using'].get('prompt_template', args.get('prompt_template', None))
if prompt_template is not None:
_validate_prompt_template(prompt_template)
args_cur = self.model_storage.json_get('args')
args_cur['using'].update(args['using'])
# check new set of arguments
self.create_validation(None, args_cur)
self.model_storage.json_set('args', args_cur)
def predict(self, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None) -> pd.DataFrame:
pred_args = args['predict_params'] if args else {}
args = self.model_storage.json_get('args')
engine_kwargs = {}
if args['using'].get('mode') == 'conversational':
user_column = args['using']['user_column']
assistant_column = args['using']['assistant_column']
messages = []
for row in df[:-1].to_dict('records'):
messages.append(f'user: {row[user_column]}')
messages.append(f'assistant: {row[assistant_column]}')
conversation = '\n'.join(messages)
questions = [
df.iloc[-1][user_column]
]
if 'prompt' in pred_args and pred_args['prompt'] is not None:
user_prompt = pred_args['prompt']
else:
user_prompt = args['using'].get('prompt', '')
prompt_template = f'{user_prompt}\n'\
f'---------------------\n' \
f'We have provided context information below. \n' \
f'{{context_str}}\n' \
f'---------------------\n' \
f'This is previous conversation history:\n' \
f'{conversation}\n' \
f'---------------------\n' \
f'Given this information, please answer the question: {{query_str}}'
engine_kwargs['text_qa_template'] = QuestionAnswerPrompt(prompt_template)
else:
input_column = args['using'].get('input_column', None)
prompt_template = args['using'].get('prompt_template', args.get('prompt_template', None))
if prompt_template is not None:
_validate_prompt_template(prompt_template)
engine_kwargs['text_qa_template'] = QuestionAnswerPrompt(prompt_template)
if input_column is None:
raise Exception(f'`input_column` must be provided at model creation time or through USING clause when predicting. Please try again.') # noqa
if input_column not in df.columns:
raise Exception(f'Column "{input_column}" not found in input data! Please try again.')
questions = df[input_column]
index_path = self.model_storage.folder_get('context')
storage_context = StorageContext.from_defaults(persist_dir=index_path)
service_context = self._get_service_context()
index = load_index_from_storage(storage_context, service_context=service_context)
query_engine = index.as_query_engine(**engine_kwargs)
results = []
for question in questions:
query_results = query_engine.query(question) # TODO: provide extra_info in explain_target col
results.append(query_results.response)
result_df = pd.DataFrame({'question': questions, args['target']: results}) # result_df['answer'].tolist()
return result_df
def _get_service_context(self):
args = self.model_storage.json_get('args')
openai_api_key = self._get_llama_index_api_key(args['using'])
openai.api_key = openai_api_key # TODO: shouldn't have to do this! bug?
llm_kwargs = {
'openai_api_key': openai_api_key
}
if 'temperature' in args['using']:
llm_kwargs['temperature'] = args['using']['temperature']
if 'model_name' in args['using']:
llm_kwargs['model_name'] = args['using']['model_name']
if 'max_tokens' in args['using']:
llm_kwargs['max_tokens'] = args['using']['max_tokens']
llm = OpenAI(**llm_kwargs) # TODO: all usual params should go here
embed_model = OpenAIEmbedding(openai_api_key=openai_api_key)
service_context = ServiceContext.from_defaults(
llm_predictor=LLMPredictor(llm=llm),
embed_model=embed_model
)
return service_context
def _setup_index(self, documents):
args = self.model_storage.json_get('args')
indexer: VectorStore = getattr(llama_index, args['using']['index_class'])
index = indexer.from_documents(documents, service_context=self._get_service_context())
return index
def _get_llama_index_api_key(self, args, strict=True):
"""
API_KEY preference order:
1. provided at model creation
2. provided at engine creation
3. OPENAI_API_KEY env variable
4. llama_index.OPENAI_API_KEY setting in config.json
Note: method is not case sensitive.
"""
key = 'OPENAI_API_KEY'
for k in key, key.lower():
# 1
if args.get(k):
return args[k]
# 2
connection_args = self.engine_storage.get_connection_args()
if k in connection_args:
return connection_args[k]
# 3
api_key = os.getenv(k)
if api_key is not None:
return api_key
# 4
config = Config()
openai_cfg = config.get('llama_index', {})
if k in openai_cfg:
return openai_cfg[k]
if strict:
raise Exception(f'Missing API key "{k}". Either re-create this ML_ENGINE specifying the `{k}` parameter, or re-create this model and pass the API key with `USING` syntax.') # noqa
| [
"llama_index.LLMPredictor",
"llama_index.OpenAIEmbedding",
"llama_index.QuestionAnswerPrompt",
"llama_index.StorageContext.from_defaults",
"llama_index.readers.schema.base.Document",
"llama_index.load_index_from_storage",
"llama_index.SimpleWebPageReader"
] | [((6636, 6688), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_path'}), '(persist_dir=index_path)\n', (6664, 6688), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((6759, 6832), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (6782, 6832), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((7132, 7194), 'pandas.DataFrame', 'pd.DataFrame', (["{'question': questions, args['target']: results}"], {}), "({'question': questions, args['target']: results})\n", (7144, 7194), True, 'import pandas as pd\n'), ((7914, 7934), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '(**llm_kwargs)\n', (7920, 7934), False, 'from langchain.llms import OpenAI\n'), ((7998, 8044), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (8013, 8044), False, 'from llama_index import LLMPredictor, OpenAIEmbedding\n'), ((2754, 2782), 'pandas.DataFrame', 'pd.DataFrame', (["[{'text': ''}]"], {}), "([{'text': ''}])\n", (2766, 2782), True, 'import pandas as pd\n'), ((5742, 5779), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['prompt_template'], {}), '(prompt_template)\n', (5762, 5779), False, 'from llama_index import SimpleWebPageReader, QuestionAnswerPrompt\n'), ((9223, 9235), 'os.getenv', 'os.getenv', (['k'], {}), '(k)\n', (9232, 9235), False, 'import os\n'), ((9340, 9348), 'mindsdb.utilities.config.Config', 'Config', ([], {}), '()\n', (9346, 9348), False, 'from mindsdb.utilities.config import Config\n'), ((6120, 6157), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['prompt_template'], {}), '(prompt_template)\n', (6140, 6157), False, 'from llama_index import SimpleWebPageReader, QuestionAnswerPrompt\n'), ((8127, 8148), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (8139, 8148), False, 'from llama_index import LLMPredictor, OpenAIEmbedding\n'), ((2995, 3006), 'llama_index.readers.schema.base.Document', 'Document', (['x'], {}), '(x)\n', (3003, 3006), False, 'from llama_index.readers.schema.base import Document\n'), ((3321, 3359), 'llama_index.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (3340, 3359), False, 'from llama_index import SimpleWebPageReader, QuestionAnswerPrompt\n')] |
"""
This script is used to summarize conversations from Zendesk support tickets.
It reads text files containing comments from the ticket and generates a summary
that includes information about the participants, problems raised, key events,
current status of the ticket, and log lines from the messages.
The script uses the `Gemini` model from the `llama_index` package to generate the summary.
The summary is saved in a text file for each ticket.
Usage:
- Modify the `MODEL` variable to specify the desired model for summarization.
- Run the script to generate summaries for the tickets.
Note: This script requires the `llama_index` package to be installed.
"""
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.gemini import Gemini
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.evaluation import FaithfulnessEvaluator
MODEL = "Gemini"
DATA_DIR = "data"
SUMMARY_ROOT = "structured.summaries"
SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_")
os.makedirs(SUMMARY_DIR, exist_ok=True)
def saveText(path, text):
"Save the given text to a file at the specified path."
with open(path, "w") as f:
f.write(text)
def commentPaths(ticketNumber):
"Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`."
ticketDir = os.path.join(DATA_DIR, ticketNumber)
return sorted(glob.glob(os.path.join(ticketDir, "*.txt")))
def summaryPath(ticketNumber):
"Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`."
return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt")
def totalSizeKB(paths):
"Returns the total size in kilobytes of the files specified by `paths`."
return sum(os.path.getsize(path) for path in paths) / 1024
def currentTime():
"Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'."
from datetime import datetime
now = datetime.now()
return now.strftime("%d/%m/%Y %H:%M:%S")
llm = Gemini()
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
summarizer = TreeSummarize(service_context=service_context, verbose=False)
# evaluator = FaithfulnessEvaluator(llm=llm)
COMPANY = "PaperCut"
BASE_PROMPT = f"The following text is a series of messages from a {COMPANY} support ticket."
def makePrompt(text):
return f"{BASE_PROMPT}\n{text}"
QUESTION_DETAIL = [
("Summary", "Summarise the whole conversation in one sentence."),
("Problems", """List the problems raised in the ticket.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Problems are issues that need to be resolved, such as a bug, a feature request.
Questions about how to use the product are not problems.
Responses to problems are not problems.
Each problem should be a single sentence describing the problem.
When there is no problem, don't write a line.
If there are multiple problems, order them by importance, most important first."""),
("Status", """What is the current status of the ticket?
Is it open, closed, or pending?
If it is closed, what was the resolution?
If it is pending, what is the next action?
If it is open, what is the current problem?
Do not include any other information in this answer.
Your answer should be one sentence for status and optionally one sentence for the resolution or next action.
"""),
("Participants", """List the participants and who they work for.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Use the format: 'Name: Company.'
List the customer first and {COMPANY} staff last.
"""),
("Events", """List the key events and the date they occurred.
An event is something that happens, such as a problem being reported, a solution being proposed, or a resolution being reached.
Don't include contacts, responses, or other non-events.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Questions about how to use the product are not events.
Responses to problems are not events.
Log lines are not events.
When there is no event, don't write a line.
Use the format: 'Date: Event.'
Format the date as 'YYYY-MM-DD'.
Order the list by date, earliest first."""),
("Logs", """List all the log lines from the messages.
Use a numbered list.
Order the list by date, earliest first.
Don't add a prologue or epilogue to the list.
When there is no log line, don't write a line.
Write the full log line.
Log lines are lines that start with a date and a status such as INFO, WARN, DEBUG or ERROR.
Example: 2022-01-27 13:31:43,628 WARN
Example: 2022-01-26 12:40:18,380 DEBUG ClientManagerImpl
Example: ERROR | wrapper | 2022/01/27 13:30:58 | JVM exited unexpectedly. """),
]
QUESTIONS = [question for question, _ in QUESTION_DETAIL]
QUESTION_PROMPT = {short: makePrompt(detail) for (short, detail) in QUESTION_DETAIL}
def makeAnswer(question, answer):
question = f"{question.upper()}:"
return f"{question:13} -------------------------------------------------------------*\n{answer}"
def summariseTicket(ticketNumber):
"""Summarizes the ticket `ticketNumber` by generating answers to a set of predefined questions.
Returns: Structured text containing the answers to each of the questions based on the
comments in the ticket.
"""
t0 = time.time()
input_files = commentPaths(ticketNumber)
reader = SimpleDirectoryReader(input_files=input_files)
docs = reader.load_data()
texts = [doc.text for doc in docs]
print(f"Loaded {len(texts)} comments in {time.time() - t0:.2f} seconds")
questionAnswer = {}
for question in reversed(QUESTIONS):
t0 = time.time()
prompt = QUESTION_PROMPT[question]
answer = summarizer.get_response(prompt, texts)
questionAnswer[question] = answer.strip()
print(f"{time.time() - t0:5.2f} seconds to answer {question}")
return "\n\n".join(makeAnswer(question, questionAnswer[question]) for question in QUESTIONS)
#
# Test case.
#
# ['1259693', '1216136', '1196141', '1260221', '1116722', '1280919']
# 0: 1259693 7 comments 2.888 kb
# 1: 1216136 26 comments 20.715 kb
# 2: 1196141 122 comments 81.527 kb
# 3: 1260221 106 comments 126.619 kb
# 4: 1116722 288 comments 190.168 kb
# 5: 1280919 216 comments 731.220 kb
MAX_SIZE = 100 # Maximum size of ticket comments in kilobytes.
if __name__ == "__main__":
import time
print(f"MODEL={MODEL}")
ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*")))
ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k))
# ticketNumbers = ticketNumbers[:2]
ticketNumbers = [k for k in ticketNumbers if totalSizeKB(commentPaths(k)) < MAX_SIZE]
print(ticketNumbers)
for i, ticketNumber in enumerate(ticketNumbers):
paths = commentPaths(ticketNumber)
print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb")
# ticketNumbers = ticketNumbers[:1]
t00 = time.time()
summaries = {}
durations = {}
commentCounts = {}
commentSizes = {}
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = len(commentPaths(ticketNumber))
commentSize = totalSizeKB(commentPaths(ticketNumber))
print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}",
flush=True)
if os.path.exists(summaryPath(ticketNumber)):
print(f" skipping ticket {ticketNumber}", flush=True)
continue # Skip tickets that have already been summarised.
t0 = time.time()
summary = summariseTicket(ticketNumber)
duration = time.time() - t0
description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}"
print(f" {description}", flush=True)
with open(summaryPath(ticketNumber), "w") as f:
print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f)
print(summary, file=f)
summaries[ticketNumber] = summary
durations[ticketNumber] = duration
commentCounts[ticketNumber] = commentCount
commentSizes[ticketNumber] = commentSize
duration = time.time() - t00
print("====================^^^====================")
print(f"Duration: {duration:.2f} seconds")
for i, ticketNumber in enumerate(ticketNumbers):
if os.path.exists(summaryPath(ticketNumber)):
continue
commentCount = commentCounts[ticketNumber]
commentSize = totalSizeKB(commentPaths(ticketNumber))
duration = durations[ticketNumber]
print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
| [
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.llms.gemini.Gemini",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.ServiceContext.from_defaults"
] | [((1113, 1152), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (1124, 1152), False, 'import os\n'), ((2083, 2091), 'llama_index.llms.gemini.Gemini', 'Gemini', ([], {}), '()\n', (2089, 2091), False, 'from llama_index.llms.gemini import Gemini\n'), ((2110, 2168), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (2138, 2168), False, 'from llama_index.core import ServiceContext\n'), ((2182, 2243), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (2195, 2243), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((1434, 1470), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (1446, 1470), False, 'import os\n'), ((1670, 1718), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (1682, 1718), False, 'import os\n'), ((2016, 2030), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2028, 2030), False, 'from datetime import datetime\n'), ((5369, 5380), 'time.time', 'time.time', ([], {}), '()\n', (5378, 5380), False, 'import time\n'), ((5439, 5485), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (5460, 5485), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((7081, 7092), 'time.time', 'time.time', ([], {}), '()\n', (7090, 7092), False, 'import time\n'), ((1060, 1093), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (1072, 1093), False, 'import os\n'), ((5710, 5721), 'time.time', 'time.time', ([], {}), '()\n', (5719, 5721), False, 'import time\n'), ((7704, 7715), 'time.time', 'time.time', ([], {}), '()\n', (7713, 7715), False, 'import time\n'), ((8357, 8368), 'time.time', 'time.time', ([], {}), '()\n', (8366, 8368), False, 'import time\n'), ((1499, 1531), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (1511, 1531), False, 'import os\n'), ((6537, 6559), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6553, 6559), False, 'import os\n'), ((7783, 7794), 'time.time', 'time.time', ([], {}), '()\n', (7792, 7794), False, 'import time\n'), ((1836, 1857), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1851, 1857), False, 'import os\n'), ((5600, 5611), 'time.time', 'time.time', ([], {}), '()\n', (5609, 5611), False, 'import time\n'), ((6582, 6609), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (6594, 6609), False, 'import os\n'), ((5888, 5899), 'time.time', 'time.time', ([], {}), '()\n', (5897, 5899), False, 'import time\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import BaseModel # type: ignore
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.indices.query.schema import QueryBundle
from llama_index.legacy.prompts.mixin import PromptDictType
from llama_index.legacy.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.legacy.types import RESPONSE_TEXT_TYPE
from llama_index.legacy.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.indices.query.schema.QueryBundle",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.legacy.core.response.schema.Response"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4901, 4950), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4905, 4950), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6844, 6870), 'llama_index.legacy.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6852, 6870), False, 'from llama_index.legacy.core.response.schema import Response\n'), ((6927, 6955), 'llama_index.legacy.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6938, 6955), False, 'from llama_index.legacy.indices.query.schema import QueryBundle\n'), ((8366, 8388), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8374, 8388), False, 'from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode\n')] |
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
import os
import requests
import chainlit as cl
from dotenv import load_dotenv
import llama_index
from llama_index.core import set_global_handler
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.faiss import FaissVectorStore
import faiss
# ChatOpenAI Templates
system_template = """You are a helpful assistant who always speaks in a pleasant tone!
"""
user_template = """{input}
Think through your response step by step.
"""
# query_engine = index.as_query_engine()
# response = query_engine.query("Who is the E-VP, Operations - and how old are they?")
# print(response.response)
#
# response = query_engine.query("What is the gross carrying amount of Total Amortizable Intangible Assets for Jan 29, 2023?")
# print(response.response)
# if storage folder exists and is not empty, load the index from it else from documents
@cl.on_chat_start
async def start_chat():
load_dotenv()
set_global_handler("wandb", run_args={"project": "aie1-llama-index-middleterm"})
wandb_callback = llama_index.core.global_handler
Settings.llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
index = None
if os.path.exists("./storage") and os.listdir("./storage"):
vector_store = FaissVectorStore.from_persist_dir("./storage")
storage_context = StorageContext.from_defaults(
vector_store=vector_store, persist_dir="./storage"
)
index = load_index_from_storage(storage_context=storage_context)
else:
with requests.get('https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf',
stream=True) as r:
r.raise_for_status() # Raises a HTTPError if the response status code is 4XX/5XX
os.makedirs(os.path.dirname('nvidia_data/paper.pdf'), exist_ok=True)
with open('nvidia_data/paper.pdf', 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
documents = SimpleDirectoryReader('nvidia_data/').load_data()
faiss_index = faiss.IndexFlatL2(1536)
storage_context = StorageContext.from_defaults(vector_store=FaissVectorStore(faiss_index=faiss_index))
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
persist_dir="./storage"
)
cl.user_session.set("wandb_callback", wandb_callback)
cl.user_session.set("query_engine", index.as_query_engine())
@cl.on_message
async def main(message: cl.Message):
Settings.callback_manager = cl.user_session.get("wandb_callback")
query_engine = cl.user_session.get("query_engine")
template = (f"You are a helpful assistant who always speaks in a pleasant tone! responds to user input with a step by step guide using this context: {message.content} input: {input}")
response = query_engine.query(template)
response_message = cl.Message(content="")
for token in response.response:
await response_message.stream_token(token=token)
await response_message.send()
@cl.on_stop
def on_stop():
print("The user wants to stop the task!")
cl.user_session.get("wandb_callback").finish()
@cl.on_chat_end
def on_chat_end():
print("The user disconnected!")
cl.user_session.get("wandb_callback").finish() | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.set_global_handler",
"llama_index.vector_stores.faiss.FaissVectorStore.from_persist_dir",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI",
"llama_index.vector_stores.faiss.FaissVectorStore",
"llama_index.core.load_index_from_storage",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1242, 1255), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1253, 1255), False, 'from dotenv import load_dotenv\n'), ((1261, 1346), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""wandb"""'], {'run_args': "{'project': 'aie1-llama-index-middleterm'}"}), "('wandb', run_args={'project': 'aie1-llama-index-middleterm'}\n )\n", (1279, 1346), False, 'from llama_index.core import set_global_handler\n'), ((1415, 1461), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0.1, model='gpt-3.5-turbo')\n", (1421, 1461), False, 'from llama_index.llms.openai import OpenAI\n'), ((1489, 1536), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-3-small"""'}), "(model='text-embedding-3-small')\n", (1504, 1536), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((2795, 2848), 'chainlit.user_session.set', 'cl.user_session.set', (['"""wandb_callback"""', 'wandb_callback'], {}), "('wandb_callback', wandb_callback)\n", (2814, 2848), True, 'import chainlit as cl\n'), ((3000, 3037), 'chainlit.user_session.get', 'cl.user_session.get', (['"""wandb_callback"""'], {}), "('wandb_callback')\n", (3019, 3037), True, 'import chainlit as cl\n'), ((3057, 3092), 'chainlit.user_session.get', 'cl.user_session.get', (['"""query_engine"""'], {}), "('query_engine')\n", (3076, 3092), True, 'import chainlit as cl\n'), ((3349, 3371), 'chainlit.Message', 'cl.Message', ([], {'content': '""""""'}), "(content='')\n", (3359, 3371), True, 'import chainlit as cl\n'), ((1562, 1589), 'os.path.exists', 'os.path.exists', (['"""./storage"""'], {}), "('./storage')\n", (1576, 1589), False, 'import os\n'), ((1594, 1617), 'os.listdir', 'os.listdir', (['"""./storage"""'], {}), "('./storage')\n", (1604, 1617), False, 'import os\n'), ((1642, 1688), 'llama_index.vector_stores.faiss.FaissVectorStore.from_persist_dir', 'FaissVectorStore.from_persist_dir', (['"""./storage"""'], {}), "('./storage')\n", (1675, 1688), False, 'from llama_index.vector_stores.faiss import FaissVectorStore\n'), ((1715, 1800), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'persist_dir': '"""./storage"""'}), "(vector_store=vector_store, persist_dir='./storage'\n )\n", (1743, 1800), False, 'from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext\n'), ((1834, 1890), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context'}), '(storage_context=storage_context)\n', (1857, 1890), False, 'from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext\n'), ((2492, 2515), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['(1536)'], {}), '(1536)\n', (2509, 2515), False, 'import faiss\n'), ((2643, 2747), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'persist_dir': '"""./storage"""'}), "(documents, storage_context=storage_context,\n persist_dir='./storage')\n", (2674, 2747), False, 'from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext\n'), ((1914, 2046), 'requests.get', 'requests.get', (['"""https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf"""'], {'stream': '(True)'}), "(\n 'https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf'\n , stream=True)\n", (1926, 2046), False, 'import requests\n'), ((3579, 3616), 'chainlit.user_session.get', 'cl.user_session.get', (['"""wandb_callback"""'], {}), "('wandb_callback')\n", (3598, 3616), True, 'import chainlit as cl\n'), ((3703, 3740), 'chainlit.user_session.get', 'cl.user_session.get', (['"""wandb_callback"""'], {}), "('wandb_callback')\n", (3722, 3740), True, 'import chainlit as cl\n'), ((2187, 2227), 'os.path.dirname', 'os.path.dirname', (['"""nvidia_data/paper.pdf"""'], {}), "('nvidia_data/paper.pdf')\n", (2202, 2227), False, 'import os\n'), ((2420, 2457), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""nvidia_data/"""'], {}), "('nvidia_data/')\n", (2441, 2457), False, 'from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext\n'), ((2584, 2625), 'llama_index.vector_stores.faiss.FaissVectorStore', 'FaissVectorStore', ([], {'faiss_index': 'faiss_index'}), '(faiss_index=faiss_index)\n', (2600, 2625), False, 'from llama_index.vector_stores.faiss import FaissVectorStore\n')] |
"""Response builder class.
This class provides general functions for taking in a set of text
and generating a response.
Will support different modes, from 1) stuffing chunks into prompt,
2) create and refine separately over each chunk, 3) tree summarization.
"""
import logging
from abc import abstractmethod
from typing import Any, Dict, Generator, List, Optional, Sequence, AsyncGenerator
from llama_index.core.base.query_pipeline.query import (
ChainableMixin,
InputKeys,
OutputKeys,
QueryComponent,
validate_and_convert_stringable,
)
from llama_index.core.base.response.schema import (
RESPONSE_TYPE,
PydanticResponse,
Response,
StreamingResponse,
AsyncStreamingResponse,
)
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.prompts.mixin import PromptMixin
from llama_index.core.schema import (
BaseNode,
MetadataMode,
NodeWithScore,
QueryBundle,
QueryType,
)
from llama_index.core.service_context import ServiceContext
from llama_index.core.service_context_elements.llm_predictor import LLMPredictorType
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
llm_from_settings_or_context,
)
from llama_index.core.types import RESPONSE_TEXT_TYPE
from llama_index.core.instrumentation.events.synthesis import (
SynthesizeStartEvent,
SynthesizeEndEvent,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
logger = logging.getLogger(__name__)
QueryTextType = QueryType
def empty_response_generator() -> Generator[str, None, None]:
yield "Empty Response"
async def empty_response_agenerator() -> AsyncGenerator[str, None]:
yield "Empty Response"
class BaseSynthesizer(ChainableMixin, PromptMixin):
"""Response builder class."""
def __init__(
self,
llm: Optional[LLMPredictorType] = None,
callback_manager: Optional[CallbackManager] = None,
prompt_helper: Optional[PromptHelper] = None,
streaming: bool = False,
output_cls: BaseModel = None,
# deprecated
service_context: Optional[ServiceContext] = None,
) -> None:
"""Init params."""
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
if callback_manager:
self._llm.callback_manager = callback_manager
self._callback_manager = (
callback_manager
or callback_manager_from_settings_or_context(Settings, service_context)
)
self._prompt_helper = (
prompt_helper
or Settings._prompt_helper
or PromptHelper.from_llm_metadata(
self._llm.metadata,
)
)
self._streaming = streaming
self._output_cls = output_cls
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompt modules."""
# TODO: keep this for now since response synthesizers don't generally have sub-modules
return {}
@property
def callback_manager(self) -> CallbackManager:
return self._callback_manager
@callback_manager.setter
def callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
self._callback_manager = callback_manager
# TODO: please fix this later
self._callback_manager = callback_manager
self._llm.callback_manager = callback_manager
@abstractmethod
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Get response."""
...
@abstractmethod
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Get response."""
...
def _log_prompt_and_response(
self,
formatted_prompt: str,
response: RESPONSE_TEXT_TYPE,
log_prefix: str = "",
) -> None:
"""Log prompt and response from LLM."""
logger.debug(f"> {log_prefix} prompt template: {formatted_prompt}")
logger.debug(f"> {log_prefix} response: {response}")
def _get_metadata_for_response(
self,
nodes: List[BaseNode],
) -> Optional[Dict[str, Any]]:
"""Get metadata for response."""
return {node.node_id: node.metadata for node in nodes}
def _prepare_response_output(
self,
response_str: Optional[RESPONSE_TEXT_TYPE],
source_nodes: List[NodeWithScore],
) -> RESPONSE_TYPE:
"""Prepare response object from response string."""
response_metadata = self._get_metadata_for_response(
[node_with_score.node for node_with_score in source_nodes]
)
if isinstance(response_str, str):
return Response(
response_str,
source_nodes=source_nodes,
metadata=response_metadata,
)
if isinstance(response_str, Generator):
return StreamingResponse(
response_str,
source_nodes=source_nodes,
metadata=response_metadata,
)
if isinstance(response_str, AsyncGenerator):
return AsyncStreamingResponse(
response_str,
source_nodes=source_nodes,
metadata=response_metadata,
)
if isinstance(response_str, self._output_cls):
return PydanticResponse(
response_str, source_nodes=source_nodes, metadata=response_metadata
)
raise ValueError(
f"Response must be a string or a generator. Found {type(response_str)}"
)
@dispatcher.span
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> RESPONSE_TYPE:
dispatcher.event(SynthesizeStartEvent(query=query))
if len(nodes) == 0:
if self._streaming:
empty_response = StreamingResponse(
response_gen=empty_response_generator()
)
dispatcher.event(
SynthesizeEndEvent(query=query, response=str(empty_response))
)
return empty_response
else:
empty_response = Response("Empty Response")
dispatcher.event(
SynthesizeEndEvent(query=query, response=str(empty_response))
)
return empty_response
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
response_str = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = additional_source_nodes or []
source_nodes = list(nodes) + list(additional_source_nodes)
response = self._prepare_response_output(response_str, source_nodes)
event.on_end(payload={EventPayload.RESPONSE: response})
dispatcher.event(SynthesizeEndEvent(query=query, response=str(response)))
return response
@dispatcher.span
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> RESPONSE_TYPE:
dispatcher.event(SynthesizeStartEvent(query=query))
if len(nodes) == 0:
if self._streaming:
empty_response = AsyncStreamingResponse(
response_gen=empty_response_agenerator()
)
dispatcher.event(
SynthesizeEndEvent(query=query, response=str(empty_response))
)
return empty_response
else:
empty_response = Response("Empty Response")
dispatcher.event(
SynthesizeEndEvent(query=query, response=str(empty_response))
)
return empty_response
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
response_str = await self.aget_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = additional_source_nodes or []
source_nodes = list(nodes) + list(additional_source_nodes)
response = self._prepare_response_output(response_str, source_nodes)
event.on_end(payload={EventPayload.RESPONSE: response})
dispatcher.event(SynthesizeEndEvent(query=query, response=str(response)))
return response
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""As query component."""
return SynthesizerComponent(synthesizer=self)
class SynthesizerComponent(QueryComponent):
"""Synthesizer component."""
synthesizer: BaseSynthesizer = Field(..., description="Synthesizer")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
self.synthesizer.callback_manager = callback_manager
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# make sure both query_str and nodes are there
if "query_str" not in input:
raise ValueError("Input must have key 'query_str'")
input["query_str"] = validate_and_convert_stringable(input["query_str"])
if "nodes" not in input:
raise ValueError("Input must have key 'nodes'")
nodes = input["nodes"]
if not isinstance(nodes, list):
raise ValueError("Input nodes must be a list")
for node in nodes:
if not isinstance(node, NodeWithScore):
raise ValueError("Input nodes must be a list of NodeWithScore")
return input
def _run_component(self, **kwargs: Any) -> Dict[str, Any]:
"""Run component."""
output = self.synthesizer.synthesize(kwargs["query_str"], kwargs["nodes"])
return {"output": output}
async def _arun_component(self, **kwargs: Any) -> Dict[str, Any]:
"""Run component."""
output = await self.synthesizer.asynthesize(
kwargs["query_str"], kwargs["nodes"]
)
return {"output": output}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys({"query_str", "nodes"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
| [
"llama_index.core.instrumentation.events.synthesis.SynthesizeStartEvent",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.base.response.schema.AsyncStreamingResponse",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.base.response.schema.PydanticResponse",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.base.response.schema.Response",
"llama_index.core.schema.QueryBundle",
"llama_index.core.base.response.schema.StreamingResponse",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable"
] | [((1679, 1714), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1704, 1714), True, 'import llama_index.core.instrumentation as instrument\n'), ((1725, 1752), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1742, 1752), False, 'import logging\n'), ((9986, 10023), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Synthesizer"""'}), "(..., description='Synthesizer')\n", (9991, 10023), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((10590, 10641), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['query_str']"], {}), "(input['query_str'])\n", (10621, 10641), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((11597, 11640), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'query_str', 'nodes'}"], {}), "({'query_str', 'nodes'})\n", (11616, 11640), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((11739, 11771), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (11759, 11771), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((2470, 2525), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (2498, 2525), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((2694, 2762), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (2735, 2762), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((2886, 2936), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (2916, 2936), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((5135, 5212), 'llama_index.core.base.response.schema.Response', 'Response', (['response_str'], {'source_nodes': 'source_nodes', 'metadata': 'response_metadata'}), '(response_str, source_nodes=source_nodes, metadata=response_metadata)\n', (5143, 5212), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((5343, 5434), 'llama_index.core.base.response.schema.StreamingResponse', 'StreamingResponse', (['response_str'], {'source_nodes': 'source_nodes', 'metadata': 'response_metadata'}), '(response_str, source_nodes=source_nodes, metadata=\n response_metadata)\n', (5360, 5434), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((5565, 5661), 'llama_index.core.base.response.schema.AsyncStreamingResponse', 'AsyncStreamingResponse', (['response_str'], {'source_nodes': 'source_nodes', 'metadata': 'response_metadata'}), '(response_str, source_nodes=source_nodes, metadata=\n response_metadata)\n', (5587, 5661), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((5794, 5884), 'llama_index.core.base.response.schema.PydanticResponse', 'PydanticResponse', (['response_str'], {'source_nodes': 'source_nodes', 'metadata': 'response_metadata'}), '(response_str, source_nodes=source_nodes, metadata=\n response_metadata)\n', (5810, 5884), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((6309, 6342), 'llama_index.core.instrumentation.events.synthesis.SynthesizeStartEvent', 'SynthesizeStartEvent', ([], {'query': 'query'}), '(query=query)\n', (6329, 6342), False, 'from llama_index.core.instrumentation.events.synthesis import SynthesizeStartEvent, SynthesizeEndEvent\n'), ((7013, 7041), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (7024, 7041), False, 'from llama_index.core.schema import BaseNode, MetadataMode, NodeWithScore, QueryBundle, QueryType\n'), ((8148, 8181), 'llama_index.core.instrumentation.events.synthesis.SynthesizeStartEvent', 'SynthesizeStartEvent', ([], {'query': 'query'}), '(query=query)\n', (8168, 8181), False, 'from llama_index.core.instrumentation.events.synthesis import SynthesizeStartEvent, SynthesizeEndEvent\n'), ((8857, 8885), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (8868, 8885), False, 'from llama_index.core.schema import BaseNode, MetadataMode, NodeWithScore, QueryBundle, QueryType\n'), ((6758, 6784), 'llama_index.core.base.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6766, 6784), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((8602, 8628), 'llama_index.core.base.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (8610, 8628), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n')] |
"""
This script is used to summarize conversations from Zendesk support tickets.
It reads text files containing comments from the ticket and generates a summary
that includes information about the participants, problems raised, key events,
current status of the ticket, and log lines from the messages.
The script uses the `Anthropic` model from the `llama_index` package to generate the summary.
The summary is saved in a text file for each ticket.
Usage:
- Modify the `MODEL` variable to specify the desired model for summarization.
- Run the script to generate summaries for the tickets.
Note: This script requires the `llama_index` package to be installed.
"""
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.anthropic import Anthropic
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.evaluation import FaithfulnessEvaluator
# MODEL = "claude-3-opus-20240229"
# MODEL = "claude-3-sonnet-20240229"
MODEL = "claude-3-haiku-20240307"
DATA_DIR = "data"
SUMMARY_ROOT = "structured.summaries"
SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_")
os.makedirs(SUMMARY_DIR, exist_ok=True)
def saveText(path, text):
"Save the given text to a file at the specified path."
with open(path, "w") as f:
f.write(text)
def commentPaths(ticketNumber):
"Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`."
ticketDir = os.path.join(DATA_DIR, ticketNumber)
return sorted(glob.glob(os.path.join(ticketDir, "*.txt")))
def summaryPath(ticketNumber):
"Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`."
return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt")
def totalSizeKB(paths):
"Returns the total size in kilobytes of the files specified by `paths`."
return sum(os.path.getsize(path) for path in paths) / 1024
def currentTime():
"Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'."
from datetime import datetime
now = datetime.now()
return now.strftime("%d/%m/%Y %H:%M:%S")
llm = Anthropic(model=MODEL, max_tokens=1024)
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
summarizer = TreeSummarize(service_context=service_context, verbose=False)
# evaluator = FaithfulnessEvaluator(llm=llm)
COMPANY = "PaperCut"
BASE_PROMPT = f"The following text is a series of messages from a {COMPANY} support ticket."
def makePrompt(text):
return f"{BASE_PROMPT}\n{text}"
QUESTION_DETAIL = [
("Summary", "Summarise the whole conversation in one sentence."),
("Problems", """List the problems raised in the ticket.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Problems are issues that need to be resolved, such as a bug, a feature request.
Questions about how to use the product are not problems.
Responses to problems are not problems.
Each problem should be a single sentence describing the problem.
When there is no problem, don't write a line.
If there are multiple problems, order them by importance, most important first."""),
("Status", """What is the current status of the ticket?
Is it open, closed, or pending?
If it is closed, what was the resolution?
If it is pending, what is the next action?
If it is open, what is the current problem?
Do not include any other information in this answer.
Your answer should be one sentence for status and optionally one sentence for the resolution or next action.
"""),
("Participants", """List the participants and who they work for.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Use the format: 'Name: Company.'
List the customer first and {COMPANY} staff last.
"""),
("Events", """List the key events and the date they occurred.
An event is something that happens, such as a problem being reported, a solution being proposed, or a resolution being reached.
Don't include contacts, responses, or other non-events.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Questions about how to use the product are not events.
Responses to problems are not events.
Log lines are not events.
When there is no event, don't write a line.
Use the format: 'Date: Event.'
Format the date as 'YYYY-MM-DD'.
Order the list by date, earliest first."""),
("Logs", """List all the log lines from the messages.
Use a numbered list.
Order the list by date, earliest first.
Don't add a prologue or epilogue to the list.
When there is no log line, don't write a line.
Write the full log line.
Log lines are lines that start with a date and a status such as INFO, WARN, DEBUG or ERROR.
Example: 2022-01-27 13:31:43,628 WARN
Example: 2022-01-26 12:40:18,380 DEBUG ClientManagerImpl
Example: ERROR | wrapper | 2022/01/27 13:30:58 | JVM exited unexpectedly. """),
]
QUESTIONS = [question for question, _ in QUESTION_DETAIL]
QUESTION_PROMPT = {short: makePrompt(detail) for (short, detail) in QUESTION_DETAIL}
def makeAnswer(question, answer):
question = f"{question.upper()}:"
return f"{question:13} -------------------------------------------------------------*\n{answer}"
def summariseTicket(ticketNumber):
"""Summarizes the ticket `ticketNumber` by generating answers to a set of predefined questions.
Returns: Structured text containing the answers to each of the questions based on the
comments in the ticket.
"""
t0 = time.time()
input_files = commentPaths(ticketNumber)
reader = SimpleDirectoryReader(input_files=input_files)
docs = reader.load_data()
texts = [doc.text for doc in docs]
print(f"Loaded {len(texts)} comments in {time.time() - t0:.2f} seconds")
questionAnswer = {}
for question in reversed(QUESTIONS):
t0 = time.time()
prompt = QUESTION_PROMPT[question]
answer = summarizer.get_response(prompt, texts)
questionAnswer[question] = answer.strip()
print(f"{time.time() - t0:5.2f} seconds to answer {question}")
return "\n\n".join(makeAnswer(question, questionAnswer[question]) for question in QUESTIONS)
#
# Test case.
#
# ['1259693', '1216136', '1196141', '1260221', '1116722', '1280919']
# 0: 1259693 7 comments 2.888 kb
# 1: 1216136 26 comments 20.715 kb
# 2: 1196141 122 comments 81.527 kb
# 3: 1260221 106 comments 126.619 kb
# 4: 1116722 288 comments 190.168 kb
# 5: 1280919 216 comments 731.220 kb
MAX_SIZE = 100 # Maximum size of ticket comments in kilobytes.
if __name__ == "__main__":
import time
print(f"MODEL={MODEL}")
ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*")))
ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k))
# ticketNumbers = ticketNumbers[:2]
ticketNumbers = [k for k in ticketNumbers if totalSizeKB(commentPaths(k)) < MAX_SIZE]
print(ticketNumbers)
for i, ticketNumber in enumerate(ticketNumbers):
paths = commentPaths(ticketNumber)
print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb")
# ticketNumbers = ticketNumbers[:1]
t00 = time.time()
summaries = {}
durations = {}
commentCounts = {}
commentSizes = {}
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = len(commentPaths(ticketNumber))
commentSize = totalSizeKB(commentPaths(ticketNumber))
print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}",
flush=True)
if os.path.exists(summaryPath(ticketNumber)):
print(f" skipping ticket {ticketNumber}", flush=True)
continue # Skip tickets that have already been summarised.
t0 = time.time()
summary = summariseTicket(ticketNumber)
duration = time.time() - t0
description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}"
print(f" {description}", flush=True)
with open(summaryPath(ticketNumber), "w") as f:
print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f)
print(summary, file=f)
summaries[ticketNumber] = summary
durations[ticketNumber] = duration
commentCounts[ticketNumber] = commentCount
commentSizes[ticketNumber] = commentSize
duration = time.time() - t00
print("====================^^^====================")
print(f"Duration: {duration:.2f} seconds")
for i, ticketNumber in enumerate(ticketNumbers):
if os.path.exists(summaryPath(ticketNumber)):
continue
commentCount = commentCounts[ticketNumber]
commentSize = totalSizeKB(commentPaths(ticketNumber))
duration = durations[ticketNumber]
print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
| [
"llama_index.llms.anthropic.Anthropic",
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.ServiceContext.from_defaults"
] | [((1211, 1250), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (1222, 1250), False, 'import os\n'), ((2181, 2220), 'llama_index.llms.anthropic.Anthropic', 'Anthropic', ([], {'model': 'MODEL', 'max_tokens': '(1024)'}), '(model=MODEL, max_tokens=1024)\n', (2190, 2220), False, 'from llama_index.llms.anthropic import Anthropic\n'), ((2239, 2297), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (2267, 2297), False, 'from llama_index.core import ServiceContext\n'), ((2311, 2372), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (2324, 2372), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((1532, 1568), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (1544, 1568), False, 'import os\n'), ((1768, 1816), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (1780, 1816), False, 'import os\n'), ((2114, 2128), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2126, 2128), False, 'from datetime import datetime\n'), ((5498, 5509), 'time.time', 'time.time', ([], {}), '()\n', (5507, 5509), False, 'import time\n'), ((5568, 5614), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (5589, 5614), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((7210, 7221), 'time.time', 'time.time', ([], {}), '()\n', (7219, 7221), False, 'import time\n'), ((1158, 1191), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (1170, 1191), False, 'import os\n'), ((5839, 5850), 'time.time', 'time.time', ([], {}), '()\n', (5848, 5850), False, 'import time\n'), ((7833, 7844), 'time.time', 'time.time', ([], {}), '()\n', (7842, 7844), False, 'import time\n'), ((8486, 8497), 'time.time', 'time.time', ([], {}), '()\n', (8495, 8497), False, 'import time\n'), ((1597, 1629), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (1609, 1629), False, 'import os\n'), ((6666, 6688), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6682, 6688), False, 'import os\n'), ((7912, 7923), 'time.time', 'time.time', ([], {}), '()\n', (7921, 7923), False, 'import time\n'), ((1934, 1955), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1949, 1955), False, 'import os\n'), ((5729, 5740), 'time.time', 'time.time', ([], {}), '()\n', (5738, 5740), False, 'import time\n'), ((6711, 6738), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (6723, 6738), False, 'import os\n'), ((6017, 6028), 'time.time', 'time.time', ([], {}), '()\n', (6026, 6028), False, 'import time\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.core.bridge.pydantic import ( # type: ignore
BaseModel,
Field,
PrivateAttr,
)
from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id",
include_metadata=True,
metadata_keys=['file_name', 'creation_date']
)
index = VectorStoreIndex.from_vector_store(
vector_store=google_vector_store
)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
include_metadata (bool): Indicates whether to include custom metadata in the query
results. Defaults to False.
metadata_keys (Optional[List[str]]): Specifies which metadata keys to include in the
query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
# Configuration options for handling metadata in query results
include_metadata: bool = False
metadata_keys: Optional[List[str]] = None
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(
cls,
*,
corpus_id: str,
include_metadata: bool = False,
metadata_keys: Optional[List[str]] = None,
) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id (str): ID of an existing corpus on Google's server.
include_metadata (bool, optional): Specifies whether to include custom metadata in the
query results. Defaults to False, meaning metadata will not be included.
metadata_keys (Optional[List[str]], optional): Specifies which metadata keys to include
in the query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(
corpus_id=corpus_id,
client=client,
include_metadata=include_metadata,
metadata_keys=metadata_keys,
)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.core.vector_stores.types.VectorStoreQuery`.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
nodes = []
include_metadata = self.include_metadata
metadata_keys = self.metadata_keys
for chunk in relevant_chunks:
metadata = {}
if include_metadata:
for custom_metadata in chunk.chunk.custom_metadata:
# Use getattr to safely extract values
value = getattr(custom_metadata, "string_value", None)
if (
value is None
): # If string_value is not set, check for numeric_value
value = getattr(custom_metadata, "numeric_value", None)
# Add to the metadata dictionary only those keys that are present in metadata_keys
if value is not None and (
metadata_keys is None or custom_metadata.key in metadata_keys
):
metadata[custom_metadata.key] = value
text_node = TextNode(
text=chunk.chunk.data.string_value,
id=_extract_chunk_id(chunk.chunk.name),
metadata=metadata, # Adding metadata to the node
)
nodes.append(text_node)
return VectorStoreQueryResult(
nodes=nodes,
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.genai_extension.delete_document",
"llama_index.vector_stores.google.genai_extension.Config",
"llama_index.vector_stores.google.genai_extension.get_corpus",
"llama_index.vector_stores.google.genai_extension.EntityName.from_str",
"llama_index.vector_stores.google.genai_extension.create_corpus",
"llama_index.vector_stores.google.genai_extension.build_semantic_retriever",
"llama_index.vector_stores.google.genai_extension.get_document",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.vector_stores.google.genai_extension.set_config",
"llama_index.core.schema.RelatedNodeInfo"
] | [((812, 839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'import logging\n'), ((2859, 2881), 'llama_index.vector_stores.google.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (2872, 2881), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((2886, 2911), 'llama_index.vector_stores.google.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (2903, 2911), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((4343, 4361), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4348, 4361), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4600, 4613), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4611, 4613), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((6413, 6446), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (6444, 6446), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8216, 8249), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (8247, 8249), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8326, 8417), 'llama_index.vector_stores.google.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (8346, 8417), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8451, 8494), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (8477, 8494), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10566, 10613), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (10570, 10613), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12317, 12364), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (12321, 12364), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12373, 12464), 'llama_index.vector_stores.google.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (12395, 12464), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((13968, 14015), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13972, 14015), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((16832, 16871), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (16858, 16871), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((6458, 6511), 'llama_index.vector_stores.google.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (6475, 6511), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10819, 10908), 'llama_index.vector_stores.google.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (10838, 10908), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((17496, 17536), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (17511, 17536), False, 'from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((8291, 8303), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8301, 8303), False, 'import uuid\n')] |
from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.core
llama_index.core.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
try:
from llama_index.callbacks.wandb import (
WandbCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"WandbCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-wandb`"
)
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
try:
from llama_index.callbacks.openinference import (
OpenInferenceCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"OpenInferenceCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-openinference`"
)
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
try:
from llama_index.callbacks.arize_phoenix import (
arize_phoenix_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArizePhoenixCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-arize-phoenix`"
)
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
try:
from llama_index.callbacks.honeyhive import (
honeyhive_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"HoneyHiveCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-honeyhive`"
)
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
try:
from llama_index.callbacks.promptlayer import (
PromptLayerHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"PromptLayerHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-promptlayer`"
)
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
try:
from llama_index.callbacks.deepeval import (
deepeval_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"DeepEvalCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-deepeval`"
)
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
try:
from llama_index.callbacks.argilla import (
argilla_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArgillaCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-argilla`"
)
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.openinference.OpenInferenceCallbackHandler",
"llama_index.callbacks.promptlayer.PromptLayerHandler",
"llama_index.callbacks.deepeval.deepeval_callback_handler",
"llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler",
"llama_index.callbacks.wandb.WandbCallbackHandler",
"llama_index.callbacks.honeyhive.honeyhive_callback_handler",
"llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.argilla.argilla_callback_handler"
] | [((941, 976), 'llama_index.callbacks.wandb.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (961, 976), False, 'from llama_index.callbacks.wandb import WandbCallbackHandler\n'), ((1424, 1467), 'llama_index.callbacks.openinference.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1452, 1467), False, 'from llama_index.callbacks.openinference import OpenInferenceCallbackHandler\n'), ((1916, 1961), 'llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1946, 1961), False, 'from llama_index.callbacks.arize_phoenix import arize_phoenix_callback_handler\n'), ((2390, 2431), 'llama_index.callbacks.honeyhive.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (2416, 2431), False, 'from llama_index.callbacks.honeyhive import honeyhive_callback_handler\n'), ((2852, 2885), 'llama_index.callbacks.promptlayer.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (2870, 2885), False, 'from llama_index.callbacks.promptlayer import PromptLayerHandler\n'), ((3309, 3349), 'llama_index.callbacks.deepeval.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (3334, 3349), False, 'from llama_index.callbacks.deepeval import deepeval_callback_handler\n'), ((3400, 3431), 'llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (3416, 3431), False, 'from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((3850, 3889), 'llama_index.callbacks.argilla.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (3874, 3889), False, 'from llama_index.callbacks.argilla import argilla_callback_handler\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel # type: ignore
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.core.response.schema import Response
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.mixin import PromptDictType
from llama_index.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.types import RESPONSE_TEXT_TYPE
from llama_index.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.core.response.schema.Response",
"llama_index.schema.TextNode",
"llama_index.indices.query.schema.QueryBundle"
] | [((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4824, 4873), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4828, 4873), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6767, 6793), 'llama_index.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6775, 6793), False, 'from llama_index.core.response.schema import Response\n'), ((6850, 6878), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6861, 6878), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((8289, 8311), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8297, 8311), False, 'from llama_index.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.bridge.pydantic import PrivateAttr
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.vector_stores.utils.node_to_metadata_dict",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((599, 618), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (608, 618), False, 'from logging import getLogger\n'), ((2444, 2497), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2476, 2497), False, 'import elasticsearch\n'), ((3820, 3836), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3828, 3836), True, 'import numpy as np\n'), ((5375, 5388), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5386, 5388), False, 'from llama_index.bridge.pydantic import PrivateAttr\n'), ((5894, 5914), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5912, 5914), False, 'import nest_asyncio\n'), ((17510, 17550), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17514, 17550), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12996, 13071), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (13006, 13071), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10461, 10485), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10483, 10485), False, 'import asyncio\n'), ((12388, 12433), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12409, 12433), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13143, 13207), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13153, 13207), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((14081, 14105), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14103, 14105), False, 'import asyncio\n'), ((16288, 16312), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16310, 16312), False, 'import asyncio\n'), ((19410, 19441), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19431, 19441), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3872, 3894), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3878, 3894), True, 'import numpy as np\n'), ((12622, 12634), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12632, 12634), False, 'import uuid\n'), ((20084, 20227), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20092, 20227), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
import llama_index
import chromadb
from importlib.metadata import version
print(f"LlamaIndex version: {version('llama_index')}")
print(f"Chroma version: {version('chromadb')}")
# Load API key from .env file
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
# Define embedding model and LLM
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.settings import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
Settings.embed_model = OpenAIEmbedding()
# Load the index with some example data
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["./data/paul_graham_essay.txt"],
).load_data()
# Chunk documents into nodes
from llama_index.core.node_parser import SentenceWindowNodeParser
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
# Extract nodes from documents
nodes = node_parser.get_nodes_from_documents(documents)
# Build the index
client = chromadb.EphemeralClient()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-base-en-v1.5",
device="cuda",
)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.chroma import ChromaVectorStore
index_name = "MyExternalContent"
# Construct vector store
vector_store = ChromaVectorStore(
chroma_collection=client.create_collection(name=index_name),
)
# Set up the storage for the embeddings
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# Setup the index
# build VectorStoreIndex that takes care of chunking documents
# and encoding chunks to embeddings for future retrieval
index = VectorStoreIndex(
nodes,
storage_context=storage_context,
embed_model=embed_model,
)
# Setup the query engine
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
# The target key defaults to `window` to match the node_parser's default
postproc = MetadataReplacementPostProcessor(
target_metadata_key="window",
)
from llama_index.core.postprocessor import SentenceTransformerRerank
# Define reranker model
rerank = SentenceTransformerRerank(
top_n = 2,
model = "BAAI/bge-reranker-base",
device = "cuda",
)
query_engine = index.as_query_engine(
similarity_top_k = 6,
vector_store_query_mode="hybrid",
alpha=0.5,
node_postprocessors = [postproc, rerank],
)
# Run a query against the naive RAG implementation
response = query_engine.query(
"What happened at InterLeaf?",
)
print(response)
window = response.source_nodes[0].node.metadata["window"]
sentence = response.source_nodes[0].node.metadata["original_text"]
print(f"Window: {window}")
print("------------------")
print(f"Original Sentence: {sentence}") | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.postprocessor.SentenceTransformerRerank",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.core.VectorStoreIndex",
"llama_index.core.postprocessor.MetadataReplacementPostProcessor",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((511, 557), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo', temperature=0.1)\n", (517, 557), False, 'from llama_index.llms.openai import OpenAI\n'), ((582, 599), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (597, 599), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((981, 1113), 'llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (1019, 1113), False, 'from llama_index.core.node_parser import SentenceWindowNodeParser\n'), ((1252, 1278), 'chromadb.EphemeralClient', 'chromadb.EphemeralClient', ([], {}), '()\n', (1276, 1278), False, 'import chromadb\n'), ((1367, 1438), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-base-en-v1.5"""', 'device': '"""cuda"""'}), "(model_name='BAAI/bge-base-en-v1.5', device='cuda')\n", (1387, 1438), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((1812, 1867), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1840, 1867), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((2020, 2106), 'llama_index.core.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'embed_model': 'embed_model'}), '(nodes, storage_context=storage_context, embed_model=\n embed_model)\n', (2036, 2106), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((2316, 2378), 'llama_index.core.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2348, 2378), False, 'from llama_index.core.postprocessor import MetadataReplacementPostProcessor\n'), ((2497, 2583), 'llama_index.core.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': '(2)', 'model': '"""BAAI/bge-reranker-base"""', 'device': '"""cuda"""'}), "(top_n=2, model='BAAI/bge-reranker-base', device=\n 'cuda')\n", (2522, 2583), False, 'from llama_index.core.postprocessor import SentenceTransformerRerank\n'), ((289, 302), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (300, 302), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((712, 779), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['./data/paul_graham_essay.txt']"}), "(input_files=['./data/paul_graham_essay.txt'])\n", (733, 779), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((108, 130), 'importlib.metadata.version', 'version', (['"""llama_index"""'], {}), "('llama_index')\n", (115, 130), False, 'from importlib.metadata import version\n'), ((160, 179), 'importlib.metadata.version', 'version', (['"""chromadb"""'], {}), "('chromadb')\n", (167, 179), False, 'from importlib.metadata import version\n')] |
import os, openai
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
# pip install google-search-results
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from Roku_cs_agent import formatter, roku_agent
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["https://www.jgmancilla.com"], # Adjust this to your needs
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
##############################################################################################################
import llama_index, os
from llama_index import ServiceContext, StorageContext
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from llama_index.indices.loading import load_index_from_storage
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0
)
llm_embeddings = OpenAIEmbeddings()
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=llm_embeddings
)
llama_index.set_global_service_context(service_context)
# The other computational tasks
representative_storage_context = StorageContext.from_defaults(persist_dir="index_representative")
personal_index = load_index_from_storage(representative_storage_context)
representative_query_engine = personal_index.as_query_engine()
##############################################################################################################
class Question(BaseModel):
question: str
@app.post('/representative')
def representative(input: Question):
response = representative_query_engine.query(input.question)
return response
@app.post('/query_cs')
def query(input: Question):
response = roku_agent.run(input.question)
return response
@app.post('/spotlight')
def query_spotlight(input: Question):
response = formatter.query_cs(input.question)
return response | [
"llama_index.set_global_service_context",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults"
] | [((49, 62), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (60, 62), False, 'from dotenv import load_dotenv\n'), ((80, 112), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (94, 112), False, 'import llama_index, os\n'), ((380, 389), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (387, 389), False, 'from fastapi import FastAPI\n'), ((947, 1000), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (957, 1000), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1030, 1048), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1046, 1048), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1068, 1133), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'llm_embeddings'}), '(llm=llm, embed_model=llm_embeddings)\n', (1096, 1133), False, 'from llama_index import ServiceContext, StorageContext\n'), ((1141, 1196), 'llama_index.set_global_service_context', 'llama_index.set_global_service_context', (['service_context'], {}), '(service_context)\n', (1179, 1196), False, 'import llama_index, os\n'), ((1263, 1327), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""index_representative"""'}), "(persist_dir='index_representative')\n", (1291, 1327), False, 'from llama_index import ServiceContext, StorageContext\n'), ((1345, 1400), 'llama_index.indices.loading.load_index_from_storage', 'load_index_from_storage', (['representative_storage_context'], {}), '(representative_storage_context)\n', (1368, 1400), False, 'from llama_index.indices.loading import load_index_from_storage\n'), ((1841, 1871), 'Roku_cs_agent.roku_agent.run', 'roku_agent.run', (['input.question'], {}), '(input.question)\n', (1855, 1871), False, 'from Roku_cs_agent import formatter, roku_agent\n'), ((1974, 2008), 'Roku_cs_agent.formatter.query_cs', 'formatter.query_cs', (['input.question'], {}), '(input.question)\n', (1992, 2008), False, 'from Roku_cs_agent import formatter, roku_agent\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""
Astra DB Vector store index.
An index based on a DB table with vector search capabilities,
powered by the astrapy library
"""
import json
import logging
from typing import Any, Dict, List, Optional, cast
from warnings import warn
import llama_index.core
from llama_index.core.bridge.pydantic import PrivateAttr
from astrapy.db import AstraDB
from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
ExactMatchFilter,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
DEFAULT_MMR_PREFETCH_FACTOR = 4.0
MAX_INSERT_BATCH_SIZE = 20
NON_INDEXED_FIELDS = ["metadata._node_content", "content"]
class AstraDBVectorStore(BasePydanticVectorStore):
"""
Astra DB Vector Store.
An abstraction of a Astra table with
vector-similarity-search. Documents, and their embeddings, are stored
in an Astra table and a vector-capable index is used for searches.
The table does not need to exist beforehand: if necessary it will
be created behind the scenes.
All Astra operations are done through the astrapy library.
Args:
collection_name (str): collection name to use. If not existing, it will be created.
token (str): The Astra DB Application Token to use.
api_endpoint (str): The Astra DB JSON API endpoint for your database.
embedding_dimension (int): length of the embedding vectors in use.
namespace (Optional[str]): The namespace to use. If not provided, 'default_keyspace'
ttl_seconds (Optional[int]): expiration time for inserted entries.
Default is no expiration.
"""
stores_text: bool = True
flat_metadata: bool = True
_embedding_dimension: int = PrivateAttr()
_ttl_seconds: Optional[int] = PrivateAttr()
_astra_db: Any = PrivateAttr()
_astra_db_collection: Any = PrivateAttr()
def __init__(
self,
*,
collection_name: str,
token: str,
api_endpoint: str,
embedding_dimension: int,
namespace: Optional[str] = None,
ttl_seconds: Optional[int] = None,
) -> None:
super().__init__()
# Set all the required class parameters
self._embedding_dimension = embedding_dimension
self._ttl_seconds = ttl_seconds
_logger.debug("Creating the Astra DB table")
# Build the Astra DB object
self._astra_db = AstraDB(
api_endpoint=api_endpoint,
token=token,
namespace=namespace,
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
from astrapy.api import APIRequestError
try:
# Create and connect to the newly created collection
self._astra_db_collection = self._astra_db.create_collection(
collection_name=collection_name,
dimension=embedding_dimension,
options={"indexing": {"deny": NON_INDEXED_FIELDS}},
)
except APIRequestError:
# possibly the collection is preexisting and has legacy
# indexing settings: verify
get_coll_response = self._astra_db.get_collections(
options={"explain": True}
)
collections = (get_coll_response["status"] or {}).get("collections") or []
preexisting = [
collection
for collection in collections
if collection["name"] == collection_name
]
if preexisting:
pre_collection = preexisting[0]
# if it has no "indexing", it is a legacy collection;
# otherwise it's unexpected warn and proceed at user's risk
pre_col_options = pre_collection.get("options") or {}
if "indexing" not in pre_col_options:
warn(
(
f"Collection '{collection_name}' is detected as "
"having indexing turned on for all fields "
"(either created manually or by older versions "
"of this plugin). This implies stricter "
"limitations on the amount of text"
" each entry can store. Consider reindexing anew on a"
" fresh collection to be able to store longer texts."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
options_json = json.dumps(pre_col_options["indexing"])
warn(
(
f"Collection '{collection_name}' has unexpected 'indexing'"
f" settings (options.indexing = {options_json})."
" This can result in odd behaviour when running "
" metadata filtering and/or unwarranted limitations"
" on storing long texts. Consider reindexing anew on a"
" fresh collection."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
# other exception
raise
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of node with embeddings
"""
# Initialize list of objects to track
nodes_list = []
# Process each node individually
for node in nodes:
# Get the metadata
metadata = node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
)
# One dictionary of node data per node
nodes_list.append(
{
"_id": node.node_id,
"content": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
"$vector": node.get_embedding(),
}
)
# Log the number of rows being added
_logger.debug(f"Adding {len(nodes_list)} rows to table")
# Initialize an empty list to hold the batches
batched_list = []
# Iterate over the node_list in steps of MAX_INSERT_BATCH_SIZE
for i in range(0, len(nodes_list), MAX_INSERT_BATCH_SIZE):
# Append a slice of node_list to the batched_list
batched_list.append(nodes_list[i : i + MAX_INSERT_BATCH_SIZE])
# Perform the bulk insert
for i, batch in enumerate(batched_list):
_logger.debug(f"Processing batch #{i + 1} of size {len(batch)}")
# Go to astrapy to perform the bulk insert
self._astra_db_collection.insert_many(batch)
# Return the list of ids
return [str(n["_id"]) for n in nodes_list]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The id of the document to delete.
"""
_logger.debug("Deleting a document from the Astra table")
self._astra_db_collection.delete(id=ref_doc_id, **delete_kwargs)
@property
def client(self) -> Any:
"""Return the underlying Astra vector table object."""
return self._astra_db_collection
@staticmethod
def _query_filters_to_dict(query_filters: MetadataFilters) -> Dict[str, Any]:
# Allow only legacy ExactMatchFilter and MetadataFilter with FilterOperator.EQ
if not all(
(
isinstance(f, ExactMatchFilter)
or (isinstance(f, MetadataFilter) and f.operator == FilterOperator.EQ)
)
for f in query_filters.filters
):
raise NotImplementedError(
"Only filters with operator=FilterOperator.EQ are supported"
)
return {f"metadata.{f.key}": f.value for f in query_filters.filters}
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
# Get the currently available query modes
_available_query_modes = [
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.MMR,
]
# Reject query if not available
if query.mode not in _available_query_modes:
raise NotImplementedError(f"Query mode {query.mode} not available.")
# Get the query embedding
query_embedding = cast(List[float], query.query_embedding)
# Process the metadata filters as needed
if query.filters is not None:
query_metadata = self._query_filters_to_dict(query.filters)
else:
query_metadata = {}
# Get the scores depending on the query mode
if query.mode == VectorStoreQueryMode.DEFAULT:
# Call the vector_find method of AstraPy
matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=query.similarity_top_k,
filter=query_metadata,
)
# Get the scores associated with each
top_k_scores = [match["$similarity"] for match in matches]
elif query.mode == VectorStoreQueryMode.MMR:
# Querying a larger number of vectors and then doing MMR on them.
if (
kwargs.get("mmr_prefetch_factor") is not None
and kwargs.get("mmr_prefetch_k") is not None
):
raise ValueError(
"'mmr_prefetch_factor' and 'mmr_prefetch_k' "
"cannot coexist in a call to query()"
)
else:
if kwargs.get("mmr_prefetch_k") is not None:
prefetch_k0 = int(kwargs["mmr_prefetch_k"])
else:
prefetch_k0 = int(
query.similarity_top_k
* kwargs.get("mmr_prefetch_factor", DEFAULT_MMR_PREFETCH_FACTOR)
)
# Get the most we can possibly need to fetch
prefetch_k = max(prefetch_k0, query.similarity_top_k)
# Call AstraPy to fetch them
prefetch_matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=prefetch_k,
filter=query_metadata,
)
# Get the MMR threshold
mmr_threshold = query.mmr_threshold or kwargs.get("mmr_threshold")
# If we have found documents, we can proceed
if prefetch_matches:
zipped_indices, zipped_embeddings = zip(
*enumerate(match["$vector"] for match in prefetch_matches)
)
pf_match_indices, pf_match_embeddings = list(zipped_indices), list(
zipped_embeddings
)
else:
pf_match_indices, pf_match_embeddings = [], []
# Call the Llama utility function to get the top k
mmr_similarities, mmr_indices = get_top_k_mmr_embeddings(
query_embedding,
pf_match_embeddings,
similarity_top_k=query.similarity_top_k,
embedding_ids=pf_match_indices,
mmr_threshold=mmr_threshold,
)
# Finally, build the final results based on the mmr values
matches = [prefetch_matches[mmr_index] for mmr_index in mmr_indices]
top_k_scores = mmr_similarities
# We have three lists to return
top_k_nodes = []
top_k_ids = []
# Get every match
for match in matches:
# Check whether we have a llama-generated node content field
if "_node_content" not in match["metadata"]:
match["metadata"]["_node_content"] = json.dumps(match)
# Create a new node object from the node metadata
node = metadata_dict_to_node(match["metadata"], text=match["content"])
# Append to the respective lists
top_k_nodes.append(node)
top_k_ids.append(match["_id"])
# return our final result
return VectorStoreQueryResult(
nodes=top_k_nodes,
similarities=top_k_scores,
ids=top_k_ids,
)
| [
"llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.core.vector_stores.utils.node_to_metadata_dict",
"llama_index.core.vector_stores.utils.metadata_dict_to_node",
"llama_index.core.vector_stores.types.VectorStoreQueryResult"
] | [((852, 879), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import logging\n'), ((2070, 2083), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2081, 2083), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2118, 2131), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2129, 2131), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2153, 2166), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2164, 2166), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2199, 2212), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2210, 2212), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((9525, 9565), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (9529, 9565), False, 'from typing import Any, Dict, List, Optional, cast\n'), ((13259, 13347), 'llama_index.core.vector_stores.types.VectorStoreQueryResult', 'VectorStoreQueryResult', ([], {'nodes': 'top_k_nodes', 'similarities': 'top_k_scores', 'ids': 'top_k_ids'}), '(nodes=top_k_nodes, similarities=top_k_scores, ids=\n top_k_ids)\n', (13281, 13347), False, 'from llama_index.core.vector_stores.types import BasePydanticVectorStore, ExactMatchFilter, FilterOperator, MetadataFilter, MetadataFilters, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult\n'), ((6510, 6589), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (6531, 6589), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13019, 13082), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (["match['metadata']"], {'text': "match['content']"}), "(match['metadata'], text=match['content'])\n", (13040, 13082), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12133, 12305), 'llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings', 'get_top_k_mmr_embeddings', (['query_embedding', 'pf_match_embeddings'], {'similarity_top_k': 'query.similarity_top_k', 'embedding_ids': 'pf_match_indices', 'mmr_threshold': 'mmr_threshold'}), '(query_embedding, pf_match_embeddings,\n similarity_top_k=query.similarity_top_k, embedding_ids=pf_match_indices,\n mmr_threshold=mmr_threshold)\n', (12157, 12305), False, 'from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings\n'), ((12919, 12936), 'json.dumps', 'json.dumps', (['match'], {}), '(match)\n', (12929, 12936), False, 'import json\n'), ((4284, 4638), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."\n , UserWarning, stacklevel=2)\n', (4288, 4638), False, 'from warnings import warn\n'), ((5177, 5216), 'json.dumps', 'json.dumps', (["pre_col_options['indexing']"], {}), "(pre_col_options['indexing'])\n", (5187, 5216), False, 'import json\n'), ((5237, 5553), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."\n , UserWarning, stacklevel=2)\n', (5241, 5553), False, 'from warnings import warn\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel # type: ignore
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.llms.mock import MockLLM
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.core.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.core.types import RESPONSE_TEXT_TYPE
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
llm=MockLLM(),
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.core.indices.query.schema.QueryBundle",
"llama_index.core.llms.mock.MockLLM",
"llama_index.vector_stores.google.genai_extension.build_generative_service",
"llama_index.core.schema.TextNode",
"llama_index.core.base.response.schema.Response"
] | [((1057, 1084), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1074, 1084), False, 'import logging\n'), ((2707, 2740), 'llama_index.vector_stores.google.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2738, 2740), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((4779, 4828), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4783, 4828), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6722, 6748), 'llama_index.core.base.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6730, 6748), False, 'from llama_index.core.base.response.schema import Response\n'), ((6805, 6833), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6816, 6833), False, 'from llama_index.core.indices.query.schema import QueryBundle\n'), ((2596, 2605), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {}), '()\n', (2603, 2605), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((8244, 8266), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8252, 8266), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, TextNode\n')] |
from langfuse import Langfuse
from llama_index.llms.openai import OpenAI
import llama_index.core
llama_index.core.set_global_handler("langfuse")
from llama_index.core.llms import ChatMessage
langfuse = Langfuse()
dataset = langfuse.get_dataset("term-extraction")
prompt = langfuse.get_prompt("extraction-prompt-1")
model = OpenAI(model="gpt-4-turbo-preview")
for item in dataset.items:
compiled_prompt = prompt.compile(input=item.input)
generation = langfuse.generation(prompt=prompt,model=model.model)
messages = [
ChatMessage(role="system", content="You are an API that must always respond with a json without any formatting."),
ChatMessage(role="user", content=compiled_prompt),
]
chat_completion = model.chat(messages)
print(chat_completion)
item.link(generation, "gpt-4-with-api-instructions")
generation.end(output=chat_completion)
# item.link(generation, "first-run-extraction") | [
"llama_index.core.llms.ChatMessage",
"llama_index.llms.openai.OpenAI"
] | [((203, 213), 'langfuse.Langfuse', 'Langfuse', ([], {}), '()\n', (211, 213), False, 'from langfuse import Langfuse\n'), ((325, 360), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-turbo-preview"""'}), "(model='gpt-4-turbo-preview')\n", (331, 360), False, 'from llama_index.llms.openai import OpenAI\n'), ((530, 653), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': '"""You are an API that must always respond with a json without any formatting."""'}), "(role='system', content=\n 'You are an API that must always respond with a json without any formatting.'\n )\n", (541, 653), False, 'from llama_index.core.llms import ChatMessage\n'), ((651, 700), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'compiled_prompt'}), "(role='user', content=compiled_prompt)\n", (662, 700), False, 'from llama_index.core.llms import ChatMessage\n')] |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.schema import Document
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.indices.managed.google.generativeai import (
GoogleIndex,
set_google_config,
)
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_from_corpus(mock_get_corpus: MagicMock) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleIndex.from_corpus(corpus_id="123")
# Assert
assert store.corpus_id == "123"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
def test_create_corpus(mock_create_corpus: MagicMock) -> None:
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_create_corpus.side_effect = fake_create_corpus
# Act
store = GoogleIndex.create_corpus(display_name="My first corpus")
# Assert
assert len(store.corpus_id) > 0
assert mock_create_corpus.call_count == 1
request = mock_create_corpus.call_args.args[0]
assert request.corpus.name == f"corpora/{store.corpus_id}"
assert request.corpus.display_name == "My first corpus"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_document")
def test_from_documents(
mock_get_document: MagicMock,
mock_batch_create_chunk: MagicMock,
mock_create_document: MagicMock,
mock_create_corpus: MagicMock,
) -> None:
from google.api_core import exceptions as gapi_exception
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_get_document.side_effect = gapi_exception.NotFound("")
mock_create_corpus.side_effect = fake_create_corpus
mock_create_document.return_value = genai.Document(name="corpora/123/documents/456")
mock_batch_create_chunk.side_effect = [
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/777"),
]
),
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/888"),
]
),
]
# Act
index = GoogleIndex.from_documents(
[
Document(text="Hello, my darling"),
Document(text="Goodbye, my baby"),
]
)
# Assert
assert mock_create_corpus.call_count == 1
create_corpus_request = mock_create_corpus.call_args.args[0]
assert create_corpus_request.corpus.name == f"corpora/{index.corpus_id}"
create_document_request = mock_create_document.call_args.args[0]
assert create_document_request.parent == f"corpora/{index.corpus_id}"
assert mock_batch_create_chunk.call_count == 2
first_batch_request = mock_batch_create_chunk.call_args_list[0].args[0]
assert (
first_batch_request.requests[0].chunk.data.string_value == "Hello, my darling"
)
second_batch_request = mock_batch_create_chunk.call_args_list[1].args[0]
assert (
second_batch_request.requests[0].chunk.data.string_value == "Goodbye, my baby"
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.query_corpus")
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_as_query_engine(
mock_get_corpus: MagicMock,
mock_generate_answer: MagicMock,
mock_query_corpus: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_query_corpus.return_value = genai.QueryCorpusResponse(
relevant_chunks=[
genai.RelevantChunk(
chunk=genai.Chunk(
name="corpora/123/documents/456/chunks/789",
data=genai.ChunkData(string_value="It's 42"),
),
chunk_relevance_score=0.9,
)
]
)
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
index = GoogleIndex.from_corpus(corpus_id="123")
query_engine = index.as_query_engine(
answer_style=genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
response = query_engine.query("What is the meaning of life?")
# Assert
assert mock_query_corpus.call_count == 1
query_corpus_request = mock_query_corpus.call_args.args[0]
assert query_corpus_request.name == "corpora/123"
assert query_corpus_request.query == "What is the meaning of life?"
assert isinstance(response, Response)
assert response.response == "42"
assert mock_generate_answer.call_count == 1
generate_answer_request = mock_generate_answer.call_args.args[0]
assert (
generate_answer_request.contents[0].parts[0].text
== "What is the meaning of life?"
)
assert (
generate_answer_request.answer_style
== genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
passages = generate_answer_request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.indices.managed.google.generativeai.set_google_config",
"llama_index.legacy.schema.Document",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus"
] | [((693, 752), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (711, 752), False, 'import pytest\n'), ((754, 798), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (759, 798), False, 'from unittest.mock import MagicMock, patch\n'), ((1012, 1071), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1030, 1071), False, 'import pytest\n'), ((1073, 1144), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (1078, 1144), False, 'from unittest.mock import MagicMock, patch\n'), ((1402, 1461), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1420, 1461), False, 'import pytest\n'), ((1463, 1537), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (1468, 1537), False, 'from unittest.mock import MagicMock, patch\n'), ((2137, 2196), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2155, 2196), False, 'import pytest\n'), ((2198, 2272), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (2203, 2272), False, 'from unittest.mock import MagicMock, patch\n'), ((2274, 2350), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_document')\n", (2279, 2350), False, 'from unittest.mock import MagicMock, patch\n'), ((2352, 2437), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks'\n )\n", (2357, 2437), False, 'from unittest.mock import MagicMock, patch\n'), ((2434, 2507), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_document')\n", (2439, 2507), False, 'from unittest.mock import MagicMock, patch\n'), ((4398, 4457), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (4416, 4457), False, 'import pytest\n'), ((4459, 4532), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.query_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.query_corpus')\n", (4464, 4532), False, 'from unittest.mock import MagicMock, patch\n'), ((4534, 4611), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (4539, 4611), False, 'from unittest.mock import MagicMock, patch\n'), ((4613, 4684), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (4618, 4684), False, 'from unittest.mock import MagicMock, patch\n'), ((570, 671), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (587, 671), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((868, 920), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (885, 920), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((934, 953), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (951, 953), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1252, 1284), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (1264, 1284), True, 'import google.ai.generativelanguage as genai\n'), ((1308, 1348), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (1331, 1348), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((1805, 1862), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus', 'GoogleIndex.create_corpus', ([], {'display_name': '"""My first corpus"""'}), "(display_name='My first corpus')\n", (1830, 1862), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((2913, 2940), 'google.api_core.exceptions.NotFound', 'gapi_exception.NotFound', (['""""""'], {}), "('')\n", (2936, 2940), True, 'from google.api_core import exceptions as gapi_exception\n'), ((3037, 3085), 'google.ai.generativelanguage.Document', 'genai.Document', ([], {'name': '"""corpora/123/documents/456"""'}), "(name='corpora/123/documents/456')\n", (3051, 3085), True, 'import google.ai.generativelanguage as genai\n'), ((4874, 4906), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (4886, 4906), True, 'import google.ai.generativelanguage as genai\n'), ((6624, 6664), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (6647, 6664), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((3531, 3565), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Hello, my darling"""'}), "(text='Hello, my darling')\n", (3539, 3565), False, 'from llama_index.legacy.schema import Document\n'), ((3579, 3612), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Goodbye, my baby"""'}), "(text='Goodbye, my baby')\n", (3587, 3612), False, 'from llama_index.legacy.schema import Document\n'), ((3208, 3264), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/777"""'}), "(name='corpora/123/documents/456/chunks/777')\n", (3219, 3264), True, 'import google.ai.generativelanguage as genai\n'), ((3369, 3425), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/888"""'}), "(name='corpora/123/documents/456/chunks/888')\n", (3380, 3425), True, 'import google.ai.generativelanguage as genai\n'), ((5155, 5194), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""It\'s 42"""'}), '(string_value="It\'s 42")\n', (5170, 5194), True, 'import google.ai.generativelanguage as genai\n'), ((5431, 5452), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (5441, 5452), True, 'import google.ai.generativelanguage as genai\n'), ((5775, 5889), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (5819, 5889), True, 'import google.ai.generativelanguage as genai\n'), ((6237, 6351), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (6281, 6351), True, 'import google.ai.generativelanguage as genai\n'), ((5611, 5651), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (5621, 5651), True, 'import google.ai.generativelanguage as genai\n'), ((6103, 6134), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (6113, 6134), True, 'import google.ai.generativelanguage as genai\n')] |