code
stringlengths 161
67.2k
| apis
sequencelengths 1
24
| extract_api
stringlengths 164
53.3k
|
---|---|---|
#!/usr/bin/env python3
from flask import Flask, request
from werkzeug.utils import secure_filename
from llama_index import GPTSimpleVectorIndex, download_loader
import json
import secrets
app = Flask(__name__)
@app.route('/index', methods = ['GET', 'POST'])
def upload_and_index():
if request.method == "POST":
f = request.files['file']
filename = f"./uploads/{secure_filename(f.filename)}"
f.save(filename)
RDFReader = download_loader('RDFReader')
document = RDFReader().load_data(file=filename)
# avoid collisions of filenames
data_id = secrets.token_hex(15)
index = GPTSimpleVectorIndex(document)
index.save_to_disk(f"{data_id}.json")
return {'id': data_id}
@app.route('/query')
def query():
args = request.args
data_id = args.get('id')
query_str = args.get('query')
q_index = GPTSimpleVectorIndex.load_from_disk(f"{data_id}.json")
result = q_index.query(f"{query_str} - return the answer and explanation in a JSON object")
try:
json_start = result.response.index('{')
answer = json.loads(result.response[json_start:])
answer.update({'success': True})
except (ValueError, json.JSONDecodeError):
answer = {'success': False, 'answer': result.response, 'explanation': ''}
return json.dumps(answer)
@app.route('/')
def hello():
return 'Hello, World!'
def run_app():
app.run(host='0.0.0.0', port=5050)
if __name__ == '__main__':
run_app()
| [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.download_loader",
"llama_index.GPTSimpleVectorIndex"
] | [((199, 214), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (204, 214), False, 'from flask import Flask, request\n'), ((893, 947), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['f"""{data_id}.json"""'], {}), "(f'{data_id}.json')\n", (928, 947), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((1342, 1360), 'json.dumps', 'json.dumps', (['answer'], {}), '(answer)\n', (1352, 1360), False, 'import json\n'), ((464, 492), 'llama_index.download_loader', 'download_loader', (['"""RDFReader"""'], {}), "('RDFReader')\n", (479, 492), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((608, 629), 'secrets.token_hex', 'secrets.token_hex', (['(15)'], {}), '(15)\n', (625, 629), False, 'import secrets\n'), ((647, 677), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['document'], {}), '(document)\n', (667, 677), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((1119, 1159), 'json.loads', 'json.loads', (['result.response[json_start:]'], {}), '(result.response[json_start:])\n', (1129, 1159), False, 'import json\n'), ((388, 415), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (403, 415), False, 'from werkzeug.utils import secure_filename\n')] |
from contextlib import contextmanager
import uuid
import os
import tiktoken
from . import S2_tools as scholar
import csv
import sys
import requests
# pdf loader
from langchain.document_loaders import OnlinePDFLoader
## paper questioning tools
from llama_index import Document
from llama_index.vector_stores import PineconeVectorStore
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
def PaperSearchAndDownload(query):
# make new workspace
if not os.path.exists( os.path.join(os.getcwd(),'workspaces') ): os.mkdir(os.path.join(os.getcwd(),'workspaces'))
workspace_dir_name = os.path.join(os.getcwd(),'workspaces',query.split()[0] + '_'+ str(uuid.uuid4().hex))
os.mkdir(workspace_dir_name)
os.mkdir(os.path.join(workspace_dir_name,'results'))
os.mkdir(os.path.join(workspace_dir_name,'refy_suggestions'))
os.environ['workspace'] = workspace_dir_name
# 1) search papers
print(' 1) Searching base papers')
papers = scholar.find_paper_from_query(query, result_limit=10)
if len(papers == 0):
papers = scholar.find_paper_from_query(query, result_limit=50)
scholar.update_dataframe(incomplete=papers, dest=os.path.join(workspace_dir_name, 'results','papers.csv'))
delete_duplicates_from_csv(csv_file=os.path.join(workspace_dir_name, 'results','papers.csv'))
# 2) Cross-reference reccomendation system:
# a paper is reccomended if and only if it's related to more than one paper
print('\n\n 2) Expanding with Scholar reccomendations')
counts = {}
candidates = {}
for paper in papers:
guesses = scholar.find_recommendations(paper)
for guess in guesses:
if not guess['isOpenAccess']: continue
candidates[guess['title']] = guess
if guess['title'] not in counts.keys(): counts[guess['title']] = 1
else: counts[guess['title']] += 1
# reccomend only papers that appeared more than once
reccomends = []
for key in counts:
if counts[key]>1: reccomends.append(candidates[key])
print(f'found {len(reccomends)} additional papers')
# update the csv
scholar.update_dataframe(incomplete= reccomends, dest=os.path.join(workspace_dir_name, 'results','papers.csv'))
delete_duplicates_from_csv(csv_file=os.path.join(workspace_dir_name, 'results','papers.csv'))
# download the papers (1/2)
print('downloading papers (1/2)')
with open(os.path.join(workspace_dir_name,'results','papers.csv'), 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
scholar.download_pdf_from_id(" ".join( row['paperId'] for row in csvfile), workspace_dir_name)
scholar.write_bib_file(csv_file=os.path.join(workspace_dir_name,'results','papers.csv'), bib_file=os.path.join(workspace_dir_name,'results','papers.bib'))
# expand further with refy reccomendendation system
print('\n\n 3) Expanding with Refy reccomendendation system')
print('this might take a while...')
scholar.refy_reccomend(bib_path=os.path.join(workspace_dir_name,'results','papers.bib'))
with open(os.path.join(workspace_dir_name, 'refy_suggestions', 'test.csv'), 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
for row in csvfile:
title = scholar.replace_non_alphanumeric(row['title'])
title = title.replace(" ","_")
save_path = os.path.join(workspace_dir_name,'refy_suggestions',(title+'.pdf'))
try:
download_paper(url=row['url'], save_path=save_path)
except:
print(f'couldn t download {row}')
return f'{os.path.join(os.getcwd(), workspace_dir_name)}'
import urllib
def download_paper(url, save_path=f"{uuid.uuid4().hex}.pdf"):
success_string = f"paper saved successfully at {os.path.join(os.path.abspath(save_path))}"
if url.endswith('.pdf'):
urllib.request.urlretrieve(url, save_path)
return success_string
if 'doi' in url:
doi = paper_id = "/".join(url.split("/")[-2:])
# Construct the Crossref API URL
print(doi)
doi_url = f"https://doi.org/{doi}"
# Send a GET request to the doi.org URL
response = requests.get(doi_url, allow_redirects=True)
# Check if the request was successful
if response.status_code == 200:
# Extract the final URL after redirection
url = response.url
if 'arxiv' in url:
# URL del paper su arXiv
# Ottieni l'ID del paper dall'URL
paper_id = url.split("/")[-1]
# Costruisci l'URL di download del paper
pdf_url = f"http://arxiv.org/pdf/{paper_id}.pdf"
# Scarica il paper in formato PDF
urllib.request.urlretrieve(pdf_url, save_path)
return success_string
else:
if '/full' in url:
urllib.request.urlretrieve(url.replace('/full','/pdf'))
return success_string
if 'plos.org' in url:
final_url = url.replace('article?', 'article/file?')
urllib.request.urlretrieve(final_url, save_path)
return success_string
return f'\nfailed to download {url}'
def download_bibtex_library(csv_path):
with open(csv_path, 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
for row in csvfile:
title = scholar.replace_non_alphanumeric(row['title'])
title = title.replace(" ","-")
save_path = os.path.join(os.path.join(csv_path, '..', title+'.pdf'))
try:
download_paper(url=row['url'], save_path=save_path)
except:
try:
download_paper(url=row['url']+'.pdf', save_path=save_path)
except:
print(f'couldn t download {row}')
def generate_chunks(text, CHUNK_LENGTH = 4000):
enc = tiktoken.encoding_for_model("gpt-4")
tokens = enc.encode(text)
token_chunks = [tokens[i:i + CHUNK_LENGTH] for i in range(0, len(tokens), CHUNK_LENGTH)]
word_chunks = [enc.decode(chunk) for chunk in token_chunks]
return word_chunks
from langchain.vectorstores import Chroma, Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
import langid
import time
# def process_pdf_folder(folder_path):
# if not os.path.exists(folder_path):
# return 'the folder does not exist, check your spelling'
# for item in os.listdir(folder_path):
# if not item.endswith('.pdf'):continue
# with open(os.path.join(folder_path,'SUMMARY.txt'), 'a', encoding='UTF-8') as write_file:
# write_file.write(item)
# write_file.write("\n\n\n")
# txt = summarize_pdf(item, model='Vicuna')
# try:
# write_file.write(txt)
# except:
# print(txt)
# with open(os.path.join(folder_path,'SUMMARY.txt'), 'r', encoding='UTF-8') as read_file:
# return read_file.read()
# # def summarize_pdf(pdf_path, model= None):
# text = readPDF(pdf_path)
# # according to the TLDR Model, consider smaller chunks
# text_chunks = generate_chunks(text, 700)
# if model is not None:
# summarizer = LocalSearchEngine(tldr_model=model)
# summary=''
# for chunk in text_chunks:
# summary += summarizer.tldr(chunk)
# return summary
def get_result_path(path, exclude = []):
for item in os.listdir(path):
if item == 'papers.csv':
return os.path.join(path, item)
if os.path.isdir(os.path.join(path, item)) and item not in exclude:
res = get_result_path(os.path.join(path, item))
if res: return res
return
def get_workspace_titles(workspace_name):
csv_file_path = get_result_path(workspace_name)
papers_available = []
with open(csv_file_path, 'r', encoding='utf-8') as file:
csv_file = csv.DictReader(file)
for row in csv_file:
papers_available.append(row['title'])
return papers_available
import re
def same_title(title1, title2):
try:
title1 = re.sub(r'[^a-zA-Z]', ' ', title1)
title2 = re.sub(r'[^a-zA-Z]', ' ', title2)
except:
return False
words1 = set(title1.lower().split())
words2 = set(title2.lower().split())
return words1 == words2 or words1 <= words2 or words1 >= words2
def glimpse_pdf(title):
# find papers.csv in workspace
for workspace_name in os.listdir('workspaces'):
csv_file_path = get_result_path(workspace_name)
if csv_file_path is None: return 'no paper found'
with open(csv_file_path, 'r', encoding='utf-8') as file:
csv_file = csv.DictReader(file)
for row in csv_file:
if same_title(row['title'], title): return f"{row['title']}, paperId: {row['paperId']}, summary: {row['abstract']}"
return f'\nno paper found with title {title}'
def count_tokens(text):
enc = tiktoken.encoding_for_model("gpt-4")
tokens = enc.encode(text)
return len(tokens)
def readPDF(pdf_path):
loader = OnlinePDFLoader(pdf_path)
data = loader.load()
text_content = ''
for page in data:
formatted_content = page.page_content.replace('\n\n', ' ')
text_content+=formatted_content
return text_content
def get_pdf_path(dir, exclude=[]):
paths = []
for item in os.listdir(dir):
itempath = os.path.join(dir,item)
if item.endswith('.pdf'): paths.append(itempath)
if os.path.isdir(itempath)and item not in exclude:
subpaths = get_pdf_path(itempath)
for i in subpaths: paths.append(i)
return paths
def delete_duplicates_from_csv(csv_file):
print('verifying duplicates...')
to_delete = []
def delete_csv_row_by_title(csv_file, title):
# Read the CSV file and store rows in a list
with open(csv_file, 'r',encoding='UTF-8') as file:
reader = csv.DictReader(file)
rows = list(reader)
# Find the row index with the matching title
row_index = None
for index, row in enumerate(rows):
if row['title'] == title:
row_index = index
break
# If no matching title is found, return
if row_index is None:
print(f"No row with title '{title}' found.")
return
# Remove the row from the list
del rows[row_index]
# Write the updated rows back to the CSV file
with open(csv_file, 'w', newline='',encoding='UTF-8') as file:
fieldnames = reader.fieldnames
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(rows)
with open(csv_file, 'r', encoding='UTF-8') as file:
DELETED = 0
reader = csv.DictReader(file)
rows = list(reader)
entries = set()
for row in rows:
if row['title']=='' or row['title'] is None: continue
if row['title'] not in entries:entries.add(row['title'])
else:
DELETED+=1
to_delete.append(row['title'])
for title in to_delete: delete_csv_row_by_title(csv_file, title=title)
print(f"Deleted {DELETED} duplicates")
return
def update_workspace_dataframe(workspace, verbose = True):
ADDED = 0
# find results.csv
csv_path = get_result_path(workspace)
# get titles in csv
titles = get_workspace_titles(workspace)
# get local papers path
paths = get_pdf_path(workspace, exclude='refy_suggestions')
# adding new to csv:
for path in paths:
exists = False
# extract the title from the local paper
title = scholar.extract_title(path)
for t in titles:
if same_title(t,title): exists = True
# add it to dataframe if it was not found on the DF
if not exists:
if verbose: print(f"\nnew paper detected: {title}")
# find it with online
paper = scholar.find_paper_online(path)
if paper :
if verbose: print(f"\t---> best match found online: {paper['title']} " )
for t in titles:
if same_title(paper['title'], title):
if verbose: print(f"\t this paper is already present in the dataframe. skipping")
else:
if verbose: print(path, '-x-> no match found')
continue
with open(csv_path, 'a', encoding='utf-8') as fp:
areYouSure = True
for t in titles:
if same_title(t,paper['title']): areYouSure =False
if not areYouSure:
if verbose: print(f"double check revealed that the paper is already in the dataframe. Skipping")
continue
if verbose: print(f"\t---> adding {paper['title']}")
ADDED +=1
paper_authors = paper.get('authors', [])
journal_data = {}
if 'journal' in paper:
journal_data = paper.get('journal',[])
if journal_data is not None:
if 'name' not in journal_data: journal_data['name'] = ''
if 'pages' not in journal_data: journal_data['pages'] = ''
if paper.get('tldr',[]) != []:tldr = paper['tldr']['text']
elif paper.get('summary',[]) != []:tldr = paper['summary']
elif 'abstract' in paper:tldr = paper['abstract']
else: tldr = 'No summary available'
if 'year' in paper:
year = paper['year']
elif 'updated' in paper:year = paper['updated']
else: year = ''
if 'citationStyles' in paper:
if 'bibtex' in paper['citationStyles']: citStyle = paper['citationStyles']['bibtex']
else: citStyle = paper['citationStyles'][0]
else: citStyle = ''
csvfile = csv.DictWriter(fp, ['paperId', 'title', 'first_author', 'year', 'abstract','tldr','bibtex','influentialCitationCount','venue','journal','pages'])
try:
csvfile.writerow({
'title': paper['title'],
'first_author': paper_authors[0]['name'] if paper_authors else '',
'year': year,
'abstract': paper['abstract'] if 'abstract' in paper else '',
'paperId': paper['paperId'] if 'paperId' in paper else '',
'tldr':tldr,
'bibtex':citStyle,
'influentialCitationCount': paper['influentialCitationCount'] if 'influentialCitationCount' in paper else '0',
'venue':paper['venue'] if 'venue' in paper else '',
'journal':journal_data['name'] if journal_data is not None else '',
'pages':journal_data['pages'] if journal_data is not None else '',
})
except Exception as e:
if verbose: print('could not add ', title, '\n',e)
# delete dupes if present
if verbose: print(f"\n\nCSV UPDATE: Added {ADDED} new papers")
# clean form dupes
delete_duplicates_from_csv(csv_path)
# update bib
scholar.write_bib_file(csv_path)
return
def load_workspace(folderdir):
docs =[]
for item in os.listdir(folderdir):
if item.endswith('.pdf'):
print(f' > loading {item}')
with suppress_stdout():
content = readPDF(os.path.join(folderdir, item))
docs.append(Document(
text = content,
doc_id = uuid.uuid4().hex
))
if item =='.'or item =='..':continue
if os.path.isdir( os.path.join(folderdir,item) ):
sub_docs = load_workspace(os.path.join(folderdir,item))
for doc in sub_docs:
docs.append(doc)
return docs
# List paths of all pdf files in a folder
def list_workspace_elements(folderdir):
docs =[]
for item in os.listdir(folderdir):
if item.endswith('.pdf'):
docs.append(rf"{os.path.join(folderdir,item)}")
if item =='.'or item =='..':continue
if os.path.isdir( os.path.join(folderdir,item) ):
sub_docs = list_workspace_elements(os.path.join(folderdir,item))
for doc in sub_docs:
docs.append(doc)
return docs
def llama_query_engine(docs:list, pinecone_index_name:str):
pinecone.init(
api_key= os.environ['PINECONE_API_KEY'],
environment= os.environ['PINECONE_API_ENV']
)
# Find the pinecone index
if pinecone_index_name not in pinecone.list_indexes():
# we create a new index
pinecone.create_index(
name=pinecone_index_name,
metric='dotproduct',
dimension=1536 # 1536 dim of text-embedding-ada-002
)
index = pinecone.Index(pinecone_index_name)
# init it
vector_store = PineconeVectorStore(pinecone_index=index)
time.sleep(1)
# setup our storage (vector db)
storage_context = StorageContext.from_defaults(
vector_store=vector_store
)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
# populate the vector store
LamaIndex = GPTVectorStoreIndex.from_documents(
docs, storage_context=storage_context,
service_context=service_context
)
print('PINECONE Vector Index initialized:\n',index.describe_index_stats())
# init the query engine
query_engine = LamaIndex.as_query_engine()
return query_engine, LamaIndex
@contextmanager
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout | [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((768, 796), 'os.mkdir', 'os.mkdir', (['workspace_dir_name'], {}), '(workspace_dir_name)\n', (776, 796), False, 'import os\n'), ((5950, 5986), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (5977, 5986), False, 'import tiktoken\n'), ((7532, 7548), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7542, 7548), False, 'import os\n'), ((8565, 8589), 'os.listdir', 'os.listdir', (['"""workspaces"""'], {}), "('workspaces')\n", (8575, 8589), False, 'import os\n'), ((9082, 9118), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (9109, 9118), False, 'import tiktoken\n'), ((9209, 9234), 'langchain.document_loaders.OnlinePDFLoader', 'OnlinePDFLoader', (['pdf_path'], {}), '(pdf_path)\n', (9224, 9234), False, 'from langchain.document_loaders import OnlinePDFLoader\n'), ((9509, 9524), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (9519, 9524), False, 'import os\n'), ((15782, 15803), 'os.listdir', 'os.listdir', (['folderdir'], {}), '(folderdir)\n', (15792, 15803), False, 'import os\n'), ((16505, 16526), 'os.listdir', 'os.listdir', (['folderdir'], {}), '(folderdir)\n', (16515, 16526), False, 'import os\n'), ((16958, 17060), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_API_ENV']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_API_ENV'])\n", (16971, 17060), False, 'import pinecone\n'), ((17396, 17431), 'pinecone.Index', 'pinecone.Index', (['pinecone_index_name'], {}), '(pinecone_index_name)\n', (17410, 17431), False, 'import pinecone\n'), ((17470, 17511), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'index'}), '(pinecone_index=index)\n', (17489, 17511), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((17516, 17529), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17526, 17529), False, 'import time\n'), ((17589, 17644), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (17617, 17644), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((17678, 17747), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (17693, 17747), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((17770, 17823), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (17798, 17823), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((17878, 17988), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(docs, storage_context=storage_context,\n service_context=service_context)\n', (17912, 17988), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((692, 703), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (701, 703), False, 'import os\n'), ((810, 853), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""'], {}), "(workspace_dir_name, 'results')\n", (822, 853), False, 'import os\n'), ((867, 919), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""'], {}), "(workspace_dir_name, 'refy_suggestions')\n", (879, 919), False, 'import os\n'), ((2613, 2631), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (2627, 2631), False, 'import csv\n'), ((3285, 3303), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (3299, 3303), False, 'import csv\n'), ((3963, 4005), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'save_path'], {}), '(url, save_path)\n', (3989, 4005), False, 'import urllib\n'), ((4283, 4326), 'requests.get', 'requests.get', (['doi_url'], {'allow_redirects': '(True)'}), '(doi_url, allow_redirects=True)\n', (4295, 4326), False, 'import requests\n'), ((4795, 4841), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['pdf_url', 'save_path'], {}), '(pdf_url, save_path)\n', (4821, 4841), False, 'import urllib\n'), ((5368, 5386), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (5382, 5386), False, 'import csv\n'), ((8008, 8028), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (8022, 8028), False, 'import csv\n'), ((8205, 8237), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'title1'], {}), "('[^a-zA-Z]', ' ', title1)\n", (8211, 8237), False, 'import re\n'), ((8256, 8288), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'title2'], {}), "('[^a-zA-Z]', ' ', title2)\n", (8262, 8288), False, 'import re\n'), ((9545, 9568), 'os.path.join', 'os.path.join', (['dir', 'item'], {}), '(dir, item)\n', (9557, 9568), False, 'import os\n'), ((10975, 10995), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (10989, 10995), False, 'import csv\n'), ((17149, 17172), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (17170, 17172), False, 'import pinecone\n'), ((17214, 17302), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'pinecone_index_name', 'metric': '"""dotproduct"""', 'dimension': '(1536)'}), "(name=pinecone_index_name, metric='dotproduct',\n dimension=1536)\n", (17235, 17302), False, 'import pinecone\n'), ((1250, 1307), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (1262, 1307), False, 'import os\n'), ((1348, 1405), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (1360, 1405), False, 'import os\n'), ((2268, 2325), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2280, 2325), False, 'import os\n'), ((2366, 2423), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2378, 2423), False, 'import os\n'), ((2509, 2566), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2521, 2566), False, 'import os\n'), ((2778, 2835), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2790, 2835), False, 'import os\n'), ((2844, 2901), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.bib"""'], {}), "(workspace_dir_name, 'results', 'papers.bib')\n", (2856, 2901), False, 'import os\n'), ((3100, 3157), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.bib"""'], {}), "(workspace_dir_name, 'results', 'papers.bib')\n", (3112, 3157), False, 'import os\n'), ((3172, 3236), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""', '"""test.csv"""'], {}), "(workspace_dir_name, 'refy_suggestions', 'test.csv')\n", (3184, 3236), False, 'import os\n'), ((3468, 3536), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""', "(title + '.pdf')"], {}), "(workspace_dir_name, 'refy_suggestions', title + '.pdf')\n", (3480, 3536), False, 'import os\n'), ((5119, 5167), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['final_url', 'save_path'], {}), '(final_url, save_path)\n', (5145, 5167), False, 'import urllib\n'), ((7602, 7626), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7614, 7626), False, 'import os\n'), ((8802, 8822), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (8816, 8822), False, 'import csv\n'), ((9636, 9659), 'os.path.isdir', 'os.path.isdir', (['itempath'], {}), '(itempath)\n', (9649, 9659), False, 'import os\n'), ((10078, 10098), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (10092, 10098), False, 'import csv\n'), ((10760, 10803), 'csv.DictWriter', 'csv.DictWriter', (['file'], {'fieldnames': 'fieldnames'}), '(file, fieldnames=fieldnames)\n', (10774, 10803), False, 'import csv\n'), ((16201, 16230), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16213, 16230), False, 'import os\n'), ((16702, 16731), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16714, 16731), False, 'import os\n'), ((576, 587), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (585, 587), False, 'import os\n'), ((627, 638), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (636, 638), False, 'import os\n'), ((3718, 3729), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3727, 3729), False, 'import os\n'), ((3806, 3818), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3816, 3818), False, 'import uuid\n'), ((3896, 3922), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (3911, 3922), False, 'import os\n'), ((5564, 5608), 'os.path.join', 'os.path.join', (['csv_path', '""".."""', "(title + '.pdf')"], {}), "(csv_path, '..', title + '.pdf')\n", (5576, 5608), False, 'import os\n'), ((7652, 7676), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7664, 7676), False, 'import os\n'), ((7738, 7762), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7750, 7762), False, 'import os\n'), ((14294, 14449), 'csv.DictWriter', 'csv.DictWriter', (['fp', "['paperId', 'title', 'first_author', 'year', 'abstract', 'tldr', 'bibtex',\n 'influentialCitationCount', 'venue', 'journal', 'pages']"], {}), "(fp, ['paperId', 'title', 'first_author', 'year', 'abstract',\n 'tldr', 'bibtex', 'influentialCitationCount', 'venue', 'journal', 'pages'])\n", (14308, 14449), False, 'import csv\n'), ((16271, 16300), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16283, 16300), False, 'import os\n'), ((16781, 16810), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16793, 16810), False, 'import os\n'), ((745, 757), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (755, 757), False, 'import uuid\n'), ((15951, 15980), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (15963, 15980), False, 'import os\n'), ((16590, 16619), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16602, 16619), False, 'import os\n'), ((16085, 16097), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16095, 16097), False, 'import uuid\n')] |
import os
import logging
import sys
from llama_index import GPTSimpleVectorIndex
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# 加载索引
new_index = GPTSimpleVectorIndex.load_from_disk('index.json')
# 查询索引
response = new_index.query("What did the author do in 9th grade?")
# 打印答案
print(response)
| [
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((82, 140), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (101, 140), False, 'import logging\n'), ((234, 283), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""index.json"""'], {}), "('index.json')\n", (269, 283), False, 'from llama_index import GPTSimpleVectorIndex\n'), ((172, 212), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (193, 212), False, 'import logging\n'), ((141, 160), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (158, 160), False, 'import logging\n')] |
import os
import openai
from fastapi import FastAPI, HTTPException
from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context
from llama_index.indices.postprocessor import SentenceEmbeddingOptimizer
from llama_index.embeddings import OpenAIEmbedding
from pydantic import BaseModel
openai.api_key = os.environ["OPENAI_API_KEY"]
app = FastAPI()
class QueryRequest(BaseModel):
question: str
class QueryResponse(BaseModel):
answer: str
embed_model = OpenAIEmbedding(embed_batch_size=10)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
set_global_service_context(service_context)
storage_context = StorageContext.from_defaults(persist_dir="./storage")
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine(
node_postprocessors=[SentenceEmbeddingOptimizer(percentile_cutoff=0.5)],
response_mode="compact",
similarity_cutoff=0.7
)
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/chat")
def query_data(request: QueryRequest):
response = query_engine.query(request.question)
if not response:
raise HTTPException(status_code=404, detail="No results found")
return QueryResponse(answer=str(response))
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.indices.postprocessor.SentenceEmbeddingOptimizer",
"llama_index.set_global_service_context",
"llama_index.embeddings.OpenAIEmbedding"
] | [((385, 394), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (392, 394), False, 'from fastapi import FastAPI, HTTPException\n'), ((510, 546), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'embed_batch_size': '(10)'}), '(embed_batch_size=10)\n', (525, 546), False, 'from llama_index.embeddings import OpenAIEmbedding\n'), ((565, 618), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (593, 618), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context\n'), ((619, 662), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (645, 662), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context\n'), ((682, 735), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (710, 735), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context\n'), ((744, 784), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (767, 784), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context\n'), ((1169, 1226), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""No results found"""'}), "(status_code=404, detail='No results found')\n", (1182, 1226), False, 'from fastapi import FastAPI, HTTPException\n'), ((848, 897), 'llama_index.indices.postprocessor.SentenceEmbeddingOptimizer', 'SentenceEmbeddingOptimizer', ([], {'percentile_cutoff': '(0.5)'}), '(percentile_cutoff=0.5)\n', (874, 897), False, 'from llama_index.indices.postprocessor import SentenceEmbeddingOptimizer\n')] |
"""Example of how to use llamaindex for semantic search.
This example assumes that initially there is a projects.DATASETS_DIR_PATH/embeddings.pkl file
that has a list of dictionaries with each dictionary containing "text",
"rule_name" and "section_label" fields.
The first time you run this script, a vector store will be creaed with
embeddings. This store will be saved to "cache/msrb_index_store".
Subsequent runs will load the vector store from this location.
Each time you run this script you enter a loop where you can ask as
many questions of the data as you'd like. Each time you ask a question
you will be given a response that tells you:
1. The rule names and section labels for the most relevant rules,
2. A brief preview of the text from those sections, and
3. An LLM-generated response to your query given the texts that it found.
You can tweak three parameters at the bottom of this script (after all of
the function definitions):
- model_name: which OpenAI model to use.
- top_k: how many rules to return.
- similarity_cutoff: threshold for relevance (between 0 and 1).
"""
import os
import pickle
from pathlib import Path
# from llama_index import SimpleDirectoryReader
# from llama_index.node_parser import SimpleNodeParser
from llama_index import (
VectorStoreIndex,
StorageContext,
LLMPredictor,
ServiceContext,
get_response_synthesizer,
load_index_from_storage,
)
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.schema import TextNode
from langchain import OpenAI
from examples import project
TEXT_DATA_FILE = Path(os.path.join(project.DATASETS_DIR_PATH, 'embeddings.pkl'))
INDEX_DATA_DIR = Path('cache/msrb_index_store')
def get_vector_store(service_context: ServiceContext) -> VectorStoreIndex:
"""Load a vector index from disk or, if it doesn't exist, create one from raw text data."""
# === Load the data ===========================================================
# Simple example of reading text files from a local directory
# reader = SimpleDirectoryReader('./data')
# documents = reader.load_data() # returns a list of Documents
# parser = SimpleNodeParser()
# nodes = parser.get_nodes_from_documents(documents) # returns a list of nodes
if INDEX_DATA_DIR.exists():
print('Loading vector store from local directory.')
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=INDEX_DATA_DIR)
# load index
index = load_index_from_storage(storage_context)
else:
print('No local index found.')
print('Loading data.')
with open('embeddings.pkl', 'rb') as f:
data = pickle.load(f)
print('Building nodes.')
nodes = []
for example in data:
node = TextNode(text=example['text'])
node.metadata['rule_name'] = example['rule_name']
node.metadata['section_label'] = example['section_label']
nodes.append(node)
print(f'Created {len(nodes)} nodes.')
print('Creating vector store.')
index = VectorStoreIndex(nodes, service_context=service_context)
# index = VectorStoreIndex.from_documents(documents)
print('Saving vector store.')
index.storage_context.persist(INDEX_DATA_DIR)
return index
def get_llm_backend(model_name: str) -> ServiceContext:
"""Get an LLM to provide embedding and text generation service."""
# === Define the LLM backend ==================================================
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name=model_name))
# configure service context
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
return service_context
def get_query_engine(index: VectorStoreIndex, response_mode: str, top_k: int, similarity_cutoff: float) -> RetrieverQueryEngine:
"""Build a query enginge by combining a retriever and response synthesizer."""
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=top_k,
)
# configure response synthesizer
response_synthesizer = get_response_synthesizer()
# assemble query engine
# query_engine = RetrieverQueryEngine.from_args(
# retriever=retriever,
# response_synthesizer=response_synthesizer,
# response_mode=response_mode
# )
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)
]
)
return query_engine
if __name__=='__main__':
model_name = "text-davinci-003"
top_k = 3
similarity_cutoff = 0.7
service_context = get_llm_backend(model_name)
index = get_vector_store(service_context)
response_mode = 'refine' # response_mode = 'no_text' for no text generation
query_engine = get_query_engine(index, response_mode, top_k, similarity_cutoff)
# query
while (query := input('Ask me a question about the MSRB rule book ("quit" to quit): ')) != 'quit':
print(f'You asked: {query}')
response = query_engine.query(query)
print('Source nodes:')
print(f'There are {len(response.source_nodes)} source nodes from the following rules:')
for source_node in response.source_nodes:
print(source_node.node.metadata['rule_name'], source_node.node.metadata['section_label'])
print(response.get_formatted_sources())
print('Response:')
print(response)
print()
print('='*40)
| [
"llama_index.get_response_synthesizer",
"llama_index.schema.TextNode",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.load_index_from_storage",
"llama_index.indices.postprocessor.SimilarityPostprocessor"
] | [((1802, 1832), 'pathlib.Path', 'Path', (['"""cache/msrb_index_store"""'], {}), "('cache/msrb_index_store')\n", (1806, 1832), False, 'from pathlib import Path\n'), ((1726, 1783), 'os.path.join', 'os.path.join', (['project.DATASETS_DIR_PATH', '"""embeddings.pkl"""'], {}), "(project.DATASETS_DIR_PATH, 'embeddings.pkl')\n", (1738, 1783), False, 'import os\n'), ((3834, 3891), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (3862, 3891), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((4176, 4233), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'top_k'}), '(index=index, similarity_top_k=top_k)\n', (4196, 4233), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((4322, 4348), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (4346, 4348), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((2542, 2598), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'INDEX_DATA_DIR'}), '(persist_dir=INDEX_DATA_DIR)\n', (2570, 2598), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((2636, 2676), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2659, 2676), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((3237, 3293), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'service_context': 'service_context'}), '(nodes, service_context=service_context)\n', (3253, 3293), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((2824, 2838), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2835, 2838), False, 'import pickle\n'), ((2940, 2970), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': "example['text']"}), "(text=example['text'])\n", (2948, 2970), False, 'from llama_index.schema import TextNode\n'), ((3733, 3777), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': 'model_name'}), '(temperature=0, model_name=model_name)\n', (3739, 3777), False, 'from langchain import OpenAI\n'), ((4724, 4784), 'llama_index.indices.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'similarity_cutoff'}), '(similarity_cutoff=similarity_cutoff)\n', (4747, 4784), False, 'from llama_index.indices.postprocessor import SimilarityPostprocessor\n')] |
from dotenv import load_dotenv
load_dotenv()
from llama_index import GPTVectorStoreIndex, TrafilaturaWebReader
import chromadb
def create_embedding_store(name):
chroma_client = chromadb.Client()
return chroma_client.create_collection(name)
def query_pages(collection, urls, questions):
docs = TrafilaturaWebReader().load_data(urls)
index = GPTVectorStoreIndex.from_documents(docs, chroma_collection=collection)
query_engine = index.as_query_engine()
for question in questions:
print(f"Question: {question} \n")
print(f"Answer: {query_engine.query(question)}")
if __name__ == "__main__":
url_list = ["https://supertype.ai", "https://supertype.ai/about-us"]
questions = [
"Who are the members of Supertype.ai",
"What problems are they trying to solve?",
"What are the important values at the company?"
]
collection = create_embedding_store("supertype")
query_pages(
collection,
url_list,
questions
)
| [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.TrafilaturaWebReader"
] | [((32, 45), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (43, 45), False, 'from dotenv import load_dotenv\n'), ((185, 202), 'chromadb.Client', 'chromadb.Client', ([], {}), '()\n', (200, 202), False, 'import chromadb\n'), ((361, 431), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {'chroma_collection': 'collection'}), '(docs, chroma_collection=collection)\n', (395, 431), False, 'from llama_index import GPTVectorStoreIndex, TrafilaturaWebReader\n'), ((310, 332), 'llama_index.TrafilaturaWebReader', 'TrafilaturaWebReader', ([], {}), '()\n', (330, 332), False, 'from llama_index import GPTVectorStoreIndex, TrafilaturaWebReader\n')] |
import logging
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
import requests
from typing import List
import re
import os
import logging
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
import requests
from typing import List
import os
import pandas as pd
import openai
import ast
TWITTER_USERNAME = "shauryr"
def generate_search_queries_prompt(question):
"""Generates the search queries prompt for the given question.
Args: question (str): The question to generate the search queries prompt for
Returns: str: The search queries prompt for the given question
"""
return (
f'Please generate four related search queries that align with the initial query: "{question}"'
f'Each variation should be presented as a list of strings, following this format: ["query 1", "query 2", "query 3", "query 4"]'
)
def get_related_questions(query):
research_template = """You are a search engine expert"""
messages = [{
"role": "system",
"content": research_template
}, {
"role": "user",
"content": generate_search_queries_prompt(query),
}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.5,
max_tokens=256
)
related_questions = get_questions(response.choices[0].message.content)
related_questions.append(query)
return related_questions
def get_questions(response_text):
data = response_text.split("\n")
data = [ast.literal_eval(item)[0] for item in data]
return data
def get_unique_docs(docs):
unique_docs_id = []
unique_docs = []
for doc in docs:
if doc.extra_info['paperId'] not in unique_docs:
unique_docs_id.append(doc.extra_info['paperId'])
unique_docs.append(doc)
return unique_docs
class SemanticScholarReader(BaseReader):
"""
A class to read and process data from Semantic Scholar API
...
Methods
-------
__init__():
Instantiate the SemanticScholar object
load_data(query: str, limit: int = 10, returned_fields: list = ["title", "abstract", "venue", "year", "paperId", "citationCount", "openAccessPdf", "authors"]) -> list:
Loads data from Semantic Scholar based on the query and returned_fields
"""
def __init__(self, timeout=10, api_key=None, base_dir="pdfs"):
"""
Instantiate the SemanticScholar object
"""
from semanticscholar import SemanticScholar
import arxiv
self.arxiv = arxiv
self.base_dir = base_dir
self.s2 = SemanticScholar(timeout=timeout)
# check for base dir
if not os.path.exists(self.base_dir):
os.makedirs(self.base_dir)
def _clear_cache(self):
"""
delete the .citation* folder
"""
import shutil
shutil.rmtree("./.citation*")
def _download_pdf(self, paper_id, url: str, base_dir="pdfs"):
logger = logging.getLogger()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
# Making a GET request
response = requests.get(url, headers=headers, stream=True)
content_type = response.headers["Content-Type"]
# As long as the content-type is application/pdf, this will download the file
if "application/pdf" in content_type:
os.makedirs(base_dir, exist_ok=True)
file_path = os.path.join(base_dir, f"{paper_id}.pdf")
# check if the file already exists
if os.path.exists(file_path):
logger.info(f"{file_path} already exists")
return file_path
with open(file_path, "wb") as file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
logger.info(f"Downloaded pdf from {url}")
return file_path
else:
logger.warning(f"{url} was not downloaded: protected")
return None
def _get_full_text_docs(self, documents: List[Document]) -> List[Document]:
from PyPDF2 import PdfReader
"""
Gets the full text of the documents from Semantic Scholar
Parameters
----------
documents: list
The list of Document object that contains the search results
Returns
-------
list
The list of Document object that contains the search results with full text
Raises
------
Exception
If there is an error while getting the full text
"""
full_text_docs = []
for paper in documents:
metadata = paper.extra_info
url = metadata["openAccessPdf"]
externalIds = metadata["externalIds"]
paper_id = metadata["paperId"]
file_path = None
persist_dir = os.path.join(self.base_dir, f"{paper_id}.pdf")
if url and not os.path.exists(persist_dir):
# Download the document first
file_path = self._download_pdf(metadata["paperId"], url, persist_dir)
if (
not url
and externalIds
and "ArXiv" in externalIds
and not os.path.exists(persist_dir)
):
# download the pdf from arxiv
file_path = self._download_pdf_from_arxiv(
paper_id, externalIds["ArXiv"]
)
# Then, check if it's a valid PDF. If it's not, skip to the next document.
if file_path:
try:
pdf = PdfReader(open(file_path, "rb"))
except Exception as e:
logging.error(
f"Failed to read pdf with exception: {e}. Skipping document..."
)
continue
text = ""
for page in pdf.pages:
text += page.extract_text()
full_text_docs.append(Document(text=text, extra_info=metadata))
return full_text_docs
def _download_pdf_from_arxiv(self, paper_id, arxiv_id):
paper = next(self.arxiv.Search(id_list=[arxiv_id], max_results=1).results())
paper.download_pdf(dirpath=self.base_dir, filename=paper_id + ".pdf")
return os.path.join(self.base_dir, f"{paper_id}.pdf")
def load_data(
self,
query,
limit,
full_text=False,
returned_fields=[
"title",
"abstract",
"venue",
"year",
"paperId",
"citationCount",
"openAccessPdf",
"authors",
"externalIds",
],
) -> List[Document]:
"""
Loads data from Semantic Scholar based on the entered query and returned_fields
Parameters
----------
query: str
The search query for the paper
limit: int, optional
The number of maximum results returned (default is 10)
returned_fields: list, optional
The list of fields to be returned from the search
Returns
-------
list
The list of Document object that contains the search results
Raises
------
Exception
If there is an error while performing the search
"""
results = []
# query = get_related_questions(query)
query = [query]
try:
for question in query:
logging.info(f"Searching for {question}")
_results = self.s2.search_paper(question, limit=limit, fields=returned_fields)
results.extend(_results[:limit])
except (requests.HTTPError, requests.ConnectionError, requests.Timeout) as e:
logging.error(
"Failed to fetch data from Semantic Scholar with exception: %s", e
)
raise
except Exception as e:
logging.error("An unexpected error occurred: %s", e)
raise
documents = []
for item in results[:limit*len(query)]:
openAccessPdf = getattr(item, "openAccessPdf", None)
abstract = getattr(item, "abstract", None)
title = getattr(item, "title", None)
text = None
# concat title and abstract
if abstract and title:
text = title + " " + abstract
elif not abstract:
text = title
metadata = {
"title": title,
"venue": getattr(item, "venue", None),
"year": getattr(item, "year", None),
"paperId": getattr(item, "paperId", None),
"citationCount": getattr(item, "citationCount", None),
"openAccessPdf": openAccessPdf.get("url") if openAccessPdf else None,
"authors": [author["name"] for author in getattr(item, "authors", [])],
"externalIds": getattr(item, "externalIds", None),
}
documents.append(Document(text=text, extra_info=metadata))
if full_text:
logging.info("Getting full text documents...")
full_text_documents = self._get_full_text_docs(documents)
documents.extend(full_text_documents)
documents = get_unique_docs(documents)
return documents
def get_twitter_badge():
"""Constructs the Markdown code for the Twitter badge."""
return f'<a href="https://twitter.com/{TWITTER_USERNAME}" target="_blank"><img src="https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white" /></a>'
def get_link_tree_badge():
return f'<a href="https://linktr.ee/shauryr" target="_blank"><img src="https://img.shields.io/badge/Linktree-39E09B?style=for-the-badge&logo=linktree&logoColor=white" /></a>'
def get_github_badge():
return f'<a href="https://github.com/shauryr/s2qa" target="_blank"><img src="https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white" /></a>'
def display_questions(sample_questions):
s = "#### 🧐 More questions? \n"
for i in sample_questions:
s += "- " + i + "\n"
return s
def get_citation(metadata):
# Extract details from metadata
title = metadata.get("title", "No Title")
venue = metadata.get("venue", "No Venue")
year = metadata.get("year", "No Year")
authors = metadata.get("authors", [])
# Generate author names in correct format
author_names = []
for author in authors[:5]:
last_name, *first_names = author.split(" ")
first_initials = " ".join(name[0] + "." for name in first_names)
author_names.append(f"{last_name}, {first_initials}")
authors_string = ", & ".join(author_names)
# APA citation format: Author1, Author2, & Author3. (Year). Title. Venue.
citation = f"{authors_string}. ({year}). **{title}**. {venue}."
return citation
def extract_numbers_in_brackets(input_string):
# use regular expressions to find all occurrences of [number]
# numbers_in_brackets = re.findall(r"\[(\d+)\]", input_string)
numbers_in_brackets = re.findall(r"\[(.*?)\]", input_string)
# numbers_in_brackets = [int(i) for num in numbers_in_brackets for i in num.split(",")]
# convert all numbers to int and remove duplicates by converting list to set and then back to list
cleaned_numbers = []
for n in numbers_in_brackets:
# Try to convert the value to an integer
try:
cleaned_numbers.append(int(n))
# If it fails (throws a ValueError), just ignore and continue with the next value
except ValueError:
continue
# Apply the rest of your code on the cleaned list
return sorted(list(set(cleaned_numbers)))
def generate_used_reference_display(source_nodes, used_nodes):
reference_display = "\n #### 📚 References: \n"
# for index in used_nodes get the source node and add it to the reference display
for index in used_nodes:
try:
source_node = source_nodes[index - 1]
except IndexError:
return "\n #### 😞 Couldnt Parse References \n"
metadata = source_node.node.metadata
reference_display += (
"[["
+ str(source_nodes.index(source_node) + 1)
+ "]"
+ "("
+ "https://www.semanticscholar.org/paper/"
+ metadata["paperId"]
+ ")] "
+ "\n `. . ."
+ str(source_node.node.text)[100:290]
+ ". . .`"
+ get_citation(metadata)
+ " \n\n"
)
return reference_display
def documents_to_df(documents):
# convert document objects to dataframe
list_data = []
for i, doc in enumerate(documents):
list_data.append(doc.extra_info.copy())
df = pd.DataFrame(list_data)
return df
def generate_reference_display(source_nodes):
reference_display = "\n ### References: \n"
for source_node in source_nodes:
metadata = source_node.node.metadata
# add number infront of citation to make it easier to reference
# reference_display += (
# "[["
# + str(source_nodes.index(source_node) + 1)
# + "]"
# + "("
# + "https://www.semanticscholar.org/paper/"
# + metadata["paperId"]
# + ")] "
# + '\n "`. . .'
# + str(source_node.node.text)[100:290]
# + ". . .` - **"
# + get_citation(metadata)
# + "** \n\n"
# )
reference_display += (
"[["
+ str(source_nodes.index(source_node) + 1)
+ "]"
+ "("
+ "https://www.semanticscholar.org/paper/"
+ metadata["paperId"]
+ ")] "
+ get_citation(metadata)
+ " \n\n"
)
return reference_display
| [
"llama_index.readers.schema.base.Document"
] | [((1299, 1406), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': 'messages', 'temperature': '(0.5)', 'max_tokens': '(256)'}), "(model='gpt-3.5-turbo', messages=messages,\n temperature=0.5, max_tokens=256)\n", (1327, 1406), False, 'import openai\n'), ((11530, 11569), 're.findall', 're.findall', (['"""\\\\[(.*?)\\\\]"""', 'input_string'], {}), "('\\\\[(.*?)\\\\]', input_string)\n", (11540, 11569), False, 'import re\n'), ((13232, 13255), 'pandas.DataFrame', 'pd.DataFrame', (['list_data'], {}), '(list_data)\n', (13244, 13255), True, 'import pandas as pd\n'), ((2741, 2773), 'semanticscholar.SemanticScholar', 'SemanticScholar', ([], {'timeout': 'timeout'}), '(timeout=timeout)\n', (2756, 2773), False, 'from semanticscholar import SemanticScholar\n'), ((3009, 3038), 'shutil.rmtree', 'shutil.rmtree', (['"""./.citation*"""'], {}), "('./.citation*')\n", (3022, 3038), False, 'import shutil\n'), ((3123, 3142), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3140, 3142), False, 'import logging\n'), ((3366, 3413), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'stream': '(True)'}), '(url, headers=headers, stream=True)\n', (3378, 3413), False, 'import requests\n'), ((6622, 6668), 'os.path.join', 'os.path.join', (['self.base_dir', 'f"""{paper_id}.pdf"""'], {}), "(self.base_dir, f'{paper_id}.pdf')\n", (6634, 6668), False, 'import os\n'), ((1649, 1671), 'ast.literal_eval', 'ast.literal_eval', (['item'], {}), '(item)\n', (1665, 1671), False, 'import ast\n'), ((2818, 2847), 'os.path.exists', 'os.path.exists', (['self.base_dir'], {}), '(self.base_dir)\n', (2832, 2847), False, 'import os\n'), ((2861, 2887), 'os.makedirs', 'os.makedirs', (['self.base_dir'], {}), '(self.base_dir)\n', (2872, 2887), False, 'import os\n'), ((3615, 3651), 'os.makedirs', 'os.makedirs', (['base_dir'], {'exist_ok': '(True)'}), '(base_dir, exist_ok=True)\n', (3626, 3651), False, 'import os\n'), ((3676, 3717), 'os.path.join', 'os.path.join', (['base_dir', 'f"""{paper_id}.pdf"""'], {}), "(base_dir, f'{paper_id}.pdf')\n", (3688, 3717), False, 'import os\n'), ((3780, 3805), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (3794, 3805), False, 'import os\n'), ((5158, 5204), 'os.path.join', 'os.path.join', (['self.base_dir', 'f"""{paper_id}.pdf"""'], {}), "(self.base_dir, f'{paper_id}.pdf')\n", (5170, 5204), False, 'import os\n'), ((9474, 9520), 'logging.info', 'logging.info', (['"""Getting full text documents..."""'], {}), "('Getting full text documents...')\n", (9486, 9520), False, 'import logging\n'), ((7834, 7875), 'logging.info', 'logging.info', (['f"""Searching for {question}"""'], {}), "(f'Searching for {question}')\n", (7846, 7875), False, 'import logging\n'), ((8118, 8203), 'logging.error', 'logging.error', (['"""Failed to fetch data from Semantic Scholar with exception: %s"""', 'e'], {}), "('Failed to fetch data from Semantic Scholar with exception: %s',\n e)\n", (8131, 8203), False, 'import logging\n'), ((8291, 8343), 'logging.error', 'logging.error', (['"""An unexpected error occurred: %s"""', 'e'], {}), "('An unexpected error occurred: %s', e)\n", (8304, 8343), False, 'import logging\n'), ((9397, 9437), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'text', 'extra_info': 'metadata'}), '(text=text, extra_info=metadata)\n', (9405, 9437), False, 'from llama_index.readers.schema.base import Document\n'), ((5232, 5259), 'os.path.exists', 'os.path.exists', (['persist_dir'], {}), '(persist_dir)\n', (5246, 5259), False, 'import os\n'), ((5534, 5561), 'os.path.exists', 'os.path.exists', (['persist_dir'], {}), '(persist_dir)\n', (5548, 5561), False, 'import os\n'), ((6310, 6350), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'text', 'extra_info': 'metadata'}), '(text=text, extra_info=metadata)\n', (6318, 6350), False, 'from llama_index.readers.schema.base import Document\n'), ((6004, 6082), 'logging.error', 'logging.error', (['f"""Failed to read pdf with exception: {e}. Skipping document..."""'], {}), "(f'Failed to read pdf with exception: {e}. Skipping document...')\n", (6017, 6082), False, 'import logging\n')] |
"""Simple horoscope predictions generator."""
from typing import List, Optional, Dict, Callable
import re
import json
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from vedastro import *
class SimpleBirthTimeReader(BasePydanticReader):
"""Simple birth time prediction reader.
Reads horoscope predictions from vedastro.org
`pip install vedastro` needed
Args:
metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
a birth time and returns a dictionary of prediction metadata.
Default is None.
"""
is_remote: bool = True
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
metadata_fn: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize with parameters."""
self._metadata_fn = metadata_fn
super().__init__()
@classmethod
def class_name(cls) -> str:
return "SimpleBirthTimeReader"
def load_data(self, birth_time: str) -> List[Document]:
"""Load data from the given birth time.
Args:
birth_time (str): birth time in this format : Location/Delhi,India/Time/01:30/14/02/2024/+05:30
Returns:
List[Document]: List of documents.
"""
documents = SimpleBirthTimeReader.birth_time_to_llama_index_nodes(birth_time)
return documents
@staticmethod
# converts vedastro horoscope predictions (JSON) to_llama-index's NodeWithScore
# so that llama index can understand vedastro predictions
def vedastro_predictions_to_llama_index_weight_nodes(
birth_time, predictions_list_json
):
from llama_index.core.schema import NodeWithScore
from llama_index.core.schema import TextNode
# Initialize an empty list
prediction_nodes = []
for prediction in predictions_list_json:
related_bod_json = prediction["RelatedBody"]
# shadbala_score = Calculate.PlanetCombinedShadbala()
rel_planets = related_bod_json["Planets"]
parsed_list = []
for planet in rel_planets:
parsed_list.append(PlanetName.Parse(planet))
# TODO temp use 1st planet, house, zodiac
planet_tags = []
shadbala_score = 0
if parsed_list: # This checks if the list is not empty
for planet in parsed_list:
shadbala_score += Calculate.PlanetShadbalaPinda(
planet, birth_time
).ToDouble()
# planet_tags = Calculate.GetPlanetTags(parsed_list[0])
predict_node = TextNode(
text=prediction["Description"],
metadata={
"name": SimpleBirthTimeReader.split_camel_case(prediction["Name"])
# "related_body": prediction['RelatedBody'],
# "planet_tags": planet_tags,
},
metadata_seperator="::",
metadata_template="{key}=>{value}",
text_template="Metadata: {metadata_str}\n-----\nContent: {content}",
)
# add in shadbala to give each prediction weights
parsed_node = NodeWithScore(
node=predict_node, score=shadbala_score
) # add in shabala score
prediction_nodes.append(parsed_node) # add to main list
return prediction_nodes
@staticmethod
def birth_time_to_llama_index_nodes(birth_time_text):
# 1 : convert raw time text into parsed time (aka time url)
parsed_birth_time = Time.FromUrl(birth_time_text).GetAwaiter().GetResult()
# 2 : do +300 horoscope prediction calculations to find correct predictions for person
all_predictions_raw = Calculate.HoroscopePredictions(parsed_birth_time)
# show the number of horo records found
print(f"Predictions Found : {len(all_predictions_raw)}")
# format list nicely so LLM can swallow (llama_index nodes)
# so that llama index can understand vedastro predictions
all_predictions_json = json.loads(
HoroscopePrediction.ToJsonList(all_predictions_raw).ToString()
)
# do final packing into llama-index formats
prediction_nodes = (
SimpleBirthTimeReader.vedastro_predictions_to_llama_index_documents(
all_predictions_json
)
)
return prediction_nodes
@staticmethod
def vedastro_predictions_to_llama_index_nodes(birth_time, predictions_list_json):
from llama_index.core.schema import NodeWithScore
from llama_index.core.schema import TextNode
# Initialize an empty list
prediction_nodes = []
for prediction in predictions_list_json:
related_bod_json = prediction["RelatedBody"]
# shadbala_score = Calculate.PlanetCombinedShadbala()
rel_planets = related_bod_json["Planets"]
parsed_list = []
for planet in rel_planets:
parsed_list.append(PlanetName.Parse(planet))
# TODO temp use 1st planet, house, zodiac
planet_tags = []
shadbala_score = 0
if parsed_list: # This checks if the list is not empty
shadbala_score = Calculate.PlanetShadbalaPinda(
parsed_list[0], birth_time
).ToDouble()
planet_tags = Calculate.GetPlanetTags(parsed_list[0])
predict_node = TextNode(
text=prediction["Description"],
metadata={
"name": ChatTools.split_camel_case(prediction["Name"]),
"related_body": prediction["RelatedBody"],
"planet_tags": planet_tags,
},
metadata_seperator="::",
metadata_template="{key}=>{value}",
text_template="Metadata: {metadata_str}\n-----\nContent: {content}",
)
# add in shadbala to give each prediction weights
prediction_nodes.append(predict_node) # add to main list
return prediction_nodes
@staticmethod
# given list vedastro lib horoscope predictions will convert to documents
def vedastro_predictions_to_llama_index_documents(predictions_list_json):
from llama_index.core import Document
from llama_index.core.schema import MetadataMode
import copy
# Initialize an empty list
prediction_nodes = []
for prediction in predictions_list_json:
# take out description (long text) from metadata, becasue already in as "content"
predict_meta = copy.deepcopy(prediction)
del predict_meta["Description"]
predict_node = Document(
text=prediction["Description"],
metadata=predict_meta,
metadata_seperator="::",
metadata_template="{key}=>{value}",
text_template="Metadata: {metadata_str}\n-----\nContent: {content}",
)
# # this is shows difference for understanding output of Documents
# print("#######################################################")
# print(
# "The LLM sees this: \n",
# predict_node.get_content(metadata_mode=MetadataMode.LLM),
# )
# print(
# "The Embedding model sees this: \n",
# predict_node.get_content(metadata_mode=MetadataMode.EMBED),
# )
# print("#######################################################")
# add in shadbala to give each prediction weights
prediction_nodes.append(predict_node) # add to main list
return prediction_nodes
@staticmethod
def split_camel_case(s):
return re.sub("((?<=[a-z])[A-Z]|(?<!\\A)[A-Z](?=[a-z]))", " \\1", s) | [
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.core.Document",
"llama_index.core.schema.NodeWithScore"
] | [((767, 780), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (778, 780), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((8054, 8115), 're.sub', 're.sub', (['"""((?<=[a-z])[A-Z]|(?<!\\\\A)[A-Z](?=[a-z]))"""', '""" \\\\1"""', 's'], {}), "('((?<=[a-z])[A-Z]|(?<!\\\\A)[A-Z](?=[a-z]))', ' \\\\1', s)\n", (8060, 8115), False, 'import re\n'), ((3380, 3434), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'predict_node', 'score': 'shadbala_score'}), '(node=predict_node, score=shadbala_score)\n', (3393, 3434), False, 'from llama_index.core.schema import NodeWithScore\n'), ((6874, 6899), 'copy.deepcopy', 'copy.deepcopy', (['prediction'], {}), '(prediction)\n', (6887, 6899), False, 'import copy\n'), ((6972, 7175), 'llama_index.core.Document', 'Document', ([], {'text': "prediction['Description']", 'metadata': 'predict_meta', 'metadata_seperator': '"""::"""', 'metadata_template': '"""{key}=>{value}"""', 'text_template': '"""Metadata: {metadata_str}\n-----\nContent: {content}"""'}), '(text=prediction[\'Description\'], metadata=predict_meta,\n metadata_seperator=\'::\', metadata_template=\'{key}=>{value}\',\n text_template="""Metadata: {metadata_str}\n-----\nContent: {content}""")\n', (6980, 7175), False, 'from llama_index.core import Document\n')] |
from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType
from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex
'''
Title of the page: A simple Python implementation of the ReAct pattern for LLMs
Name of the website: LlamaIndex (GPT Index) is a data framework for your LLM application.
URL: https://github.com/jerryjliu/llama_index
'''
docs = SimpleDirectoryReader("../data/paul_graham/").load_data()
from llama_index import ServiceContext, LLMPredictor, TreeIndex
from langchain.chat_models import ChatOpenAI
llm_predictor = LLMPredictor(llm=ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0))
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
callback_manager = CallbackManager([llama_debug])
service_context = ServiceContext.from_defaults(callback_manager=callback_manager, llm_predictor=llm_predictor)
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
# Print info on the LLM calls during the list index query
print(llama_debug.get_event_time_info(CBEventType.LLM))
# Print info on llm inputs/outputs - returns start/end events for each LLM call
event_pairs = llama_debug.get_llm_inputs_outputs()
print(event_pairs[0][0])
print(event_pairs[0][1].payload.keys())
print(event_pairs[0][1].payload['response'])
# Get info on any event type
event_pairs = llama_debug.get_event_pairs(CBEventType.CHUNKING)
print(event_pairs[0][0].payload.keys()) # get first chunking start event
print(event_pairs[0][1].payload.keys()) # get first chunking end event
# Clear the currently cached events
llama_debug.flush_event_logs()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.callbacks.CallbackManager"
] | [((676, 718), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (693, 718), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((738, 768), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (753, 768), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((787, 883), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'callback_manager': 'callback_manager', 'llm_predictor': 'llm_predictor'}), '(callback_manager=callback_manager,\n llm_predictor=llm_predictor)\n', (815, 883), False, 'from llama_index import ServiceContext, LLMPredictor, TreeIndex\n'), ((889, 959), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (920, 959), False, 'from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((405, 450), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../data/paul_graham/"""'], {}), "('../data/paul_graham/')\n", (426, 450), False, 'from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((606, 659), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (616, 659), False, 'from langchain.chat_models import ChatOpenAI\n')] |
import logging
import os
from llama_index import (
StorageContext,
load_index_from_storage,
)
from app.engine.constants import STORAGE_DIR
from app.engine.context import create_service_context
def get_chat_engine():
service_context = create_service_context()
# check if storage already exists
if not os.path.exists(STORAGE_DIR):
raise Exception(
"StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first"
)
logger = logging.getLogger("uvicorn")
# load the existing index
logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
index = load_index_from_storage(storage_context, service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index.as_chat_engine()
| [
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((249, 273), 'app.engine.context.create_service_context', 'create_service_context', ([], {}), '()\n', (271, 273), False, 'from app.engine.context import create_service_context\n'), ((507, 535), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (524, 535), False, 'import logging\n'), ((644, 697), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'STORAGE_DIR'}), '(persist_dir=STORAGE_DIR)\n', (672, 697), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((710, 783), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (733, 783), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((323, 350), 'os.path.exists', 'os.path.exists', (['STORAGE_DIR'], {}), '(STORAGE_DIR)\n', (337, 350), False, 'import os\n')] |
"""Module for loading index."""
import logging
from typing import TYPE_CHECKING, Any, Optional
from llama_index import ServiceContext, StorageContext, load_index_from_storage
from llama_index.indices.base import BaseIndex
from ols.app.models.config import ReferenceContent
# This is to avoid importing HuggingFaceBgeEmbeddings in all cases, because in
# runtime it is used only under some conditions. OTOH we need to make Python
# interpreter happy in all circumstances, hence the definiton of Any symbol.
if TYPE_CHECKING:
from langchain_community.embeddings import HuggingFaceBgeEmbeddings # TCH004
else:
HuggingFaceBgeEmbeddings = Any
logger = logging.getLogger(__name__)
class IndexLoader:
"""Load index from local file storage."""
def __init__(self, index_config: Optional[ReferenceContent]) -> None:
"""Initialize loader."""
self._index: Optional[BaseIndex] = None
self._index_config = index_config
logger.debug(f"Config used for index load: {self._index_config}")
if self._index_config is None:
logger.warning("Config for reference content is not set.")
else:
self._index_path = self._index_config.product_docs_index_path
self._index_id = self._index_config.product_docs_index_id
self._embed_model_path = self._index_config.embeddings_model_path
self._embed_model = self._get_embed_model()
self._load_index()
def _get_embed_model(self) -> Optional[str | HuggingFaceBgeEmbeddings]:
"""Get embed model according to configuration."""
if self._embed_model_path is not None:
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
logger.debug(
f"Loading embedding model info from path {self._embed_model_path}"
)
return HuggingFaceBgeEmbeddings(model_name=self._embed_model_path)
logger.warning("Embedding model path is not set.")
logger.warning("Embedding model is set to default")
return "local:BAAI/bge-base-en"
def _set_context(self) -> None:
"""Set storage/service context required for index load."""
logger.debug(f"Using {self._embed_model!s} as embedding model for index.")
logger.info("Setting up service context for index load...")
self._service_context = ServiceContext.from_defaults(
embed_model=self._embed_model, llm=None
)
logger.info("Setting up storage context for index load...")
self._storage_context = StorageContext.from_defaults(
persist_dir=self._index_path
)
def _load_index(self) -> None:
"""Load vector index."""
if self._index_path is None:
logger.warning("Index path is not set.")
else:
try:
self._set_context()
logger.info("Loading vector index...")
self._index = load_index_from_storage(
service_context=self._service_context,
storage_context=self._storage_context,
index_id=self._index_id,
)
logger.info("Vector index is loaded.")
except Exception as err:
logger.exception(f"Error loading vector index:\n{err}")
@property
def vector_index(self) -> Optional[BaseIndex]:
"""Get index."""
if self._index is None:
logger.warning(
"Proceeding without RAG content. "
"Either there is an error or required parameters are not set."
)
return self._index
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((661, 688), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (678, 688), False, 'import logging\n'), ((2376, 2445), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'self._embed_model', 'llm': 'None'}), '(embed_model=self._embed_model, llm=None)\n', (2404, 2445), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((2568, 2626), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'self._index_path'}), '(persist_dir=self._index_path)\n', (2596, 2626), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((1869, 1928), 'langchain_community.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': 'self._embed_model_path'}), '(model_name=self._embed_model_path)\n', (1893, 1928), False, 'from langchain_community.embeddings import HuggingFaceBgeEmbeddings\n'), ((2960, 3090), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'service_context': 'self._service_context', 'storage_context': 'self._storage_context', 'index_id': 'self._index_id'}), '(service_context=self._service_context,\n storage_context=self._storage_context, index_id=self._index_id)\n', (2983, 3090), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n')] |
from llama_index import PromptTemplate
instruction_str = """\
1. Convert the query to executable Python code using Pandas.
2. The final line of code should be a Python expression that can be called with the `eval()` function.
3. The code should represent a solution to the query.
4. PRINT ONLY THE EXPRESSION.
5. Do not quote the expression."""
new_prompt = PromptTemplate(
"""\
You are working with a pandas dataframe in Python.
The name of the dataframe is `df`.
This is the result of `print(df.head())`:
{df_str}
Follow these instructions:
{instruction_str}
Query: {query_str}
Expression: """
)
context = """Purpose: The primary role of this agent is to assist users by providing accurate
information about world population statistics and details about a country. """
| [
"llama_index.PromptTemplate"
] | [((381, 660), 'llama_index.PromptTemplate', 'PromptTemplate', (['""" You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expression: """'], {}), '(\n """ You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expression: """\n )\n', (395, 660), False, 'from llama_index import PromptTemplate\n')] |
import os, shutil, datetime, time, json
import gradio as gr
import sys
import os
from llama_index import GPTSimpleVectorIndex
bank_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../memory_bank')
sys.path.append(bank_path)
from build_memory_index import build_memory_index
memory_bank_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../memory_bank')
sys.path.append(memory_bank_path)
from summarize_memory import summarize_memory
def enter_name(name, memory,local_memory_qa,data_args,update_memory_index=True):
cur_date = datetime.date.today().strftime("%Y-%m-%d")
user_memory_index = None
if isinstance(data_args,gr.State):
data_args = data_args.value
if isinstance(memory,gr.State):
memory = memory.value
if isinstance(local_memory_qa,gr.State):
local_memory_qa=local_memory_qa.value
memory_dir = os.path.join(data_args.memory_basic_dir,data_args.memory_file)
if name in memory.keys():
user_memory = memory[name]
memory_index_path = os.path.join(data_args.memory_basic_dir,f'memory_index/{name}_index')
os.makedirs(os.path.dirname(memory_index_path), exist_ok=True)
if (not os.path.exists(memory_index_path)) or update_memory_index:
print(f'Initializing memory index {memory_index_path}...')
# filepath = input("Input your local knowledge file path 请输入本地知识文件路径:")
if os.path.exists(memory_index_path):
shutil.rmtree(memory_index_path)
memory_index_path, _ = local_memory_qa.init_memory_vector_store(filepath=memory_dir,vs_path=memory_index_path,user_name=name,cur_date=cur_date)
user_memory_index = local_memory_qa.load_memory_index(memory_index_path) if memory_index_path else None
msg = f"欢迎回来,{name}!" if data_args.language=='cn' else f"Wellcome Back, {name}!"
return msg,user_memory,memory, name,user_memory_index
else:
memory[name] = {}
memory[name].update({"name":name})
msg = f"欢迎新用户{name}!我会记住你的名字,下次见面就能叫你的名字啦!" if data_args.language == 'cn' else f'Welcome, new user {name}! I will remember your name, so next time we meet, I\'ll be able to call you by your name!'
return msg,memory[name],memory,name,user_memory_index
def enter_name_llamaindex(name, memory, data_args, update_memory_index=True):
user_memory_index = None
if name in memory.keys():
user_memory = memory[name]
memory_index_path = os.path.join(data_args.memory_basic_dir,f'memory_index/{name}_index.json')
if not os.path.exists(memory_index_path) or update_memory_index:
print(f'Initializing memory index {memory_index_path}...')
build_memory_index(memory,data_args,name=name)
if os.path.exists(memory_index_path):
user_memory_index = GPTSimpleVectorIndex.load_from_disk(memory_index_path)
print(f'Successfully load memory index for user {name}!')
return f"Wellcome Back, {name}!",user_memory,user_memory_index
else:
memory[name] = {}
memory[name].update({"name":name})
return f"Welcome new user{name}!I will remember your name and call you by your name in the next conversation",memory[name],user_memory_index
def summarize_memory_event_personality(data_args, memory, user_name):
if isinstance(data_args,gr.State):
data_args = data_args.value
if isinstance(memory,gr.State):
memory = memory.value
memory_dir = os.path.join(data_args.memory_basic_dir,data_args.memory_file)
memory = summarize_memory(memory_dir,user_name,language=data_args.language)
user_memory = memory[user_name] if user_name in memory.keys() else {}
return user_memory#, user_memory_index
def save_local_memory(memory,b,user_name,data_args):
if isinstance(data_args,gr.State):
data_args = data_args.value
if isinstance(memory,gr.State):
memory = memory.value
memory_dir = os.path.join(data_args.memory_basic_dir,data_args.memory_file)
date = time.strftime("%Y-%m-%d", time.localtime())
if memory[user_name].get("history") is None:
memory[user_name].update({"history":{}})
if memory[user_name]['history'].get(date) is None:
memory[user_name]['history'][date] = []
# date = len(memory[user_name]['history'])
memory[user_name]['history'][date].append({'query':b[-1][0],'response':b[-1][1]})
json.dump(memory,open(memory_dir,"w",encoding="utf-8"),ensure_ascii=False)
return memory | [
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((213, 239), 'sys.path.append', 'sys.path.append', (['bank_path'], {}), '(bank_path)\n', (228, 239), False, 'import sys\n'), ((384, 417), 'sys.path.append', 'sys.path.append', (['memory_bank_path'], {}), '(memory_bank_path)\n', (399, 417), False, 'import sys\n'), ((882, 945), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'data_args.memory_file'], {}), '(data_args.memory_basic_dir, data_args.memory_file)\n', (894, 945), False, 'import os\n'), ((3526, 3589), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'data_args.memory_file'], {}), '(data_args.memory_basic_dir, data_args.memory_file)\n', (3538, 3589), False, 'import os\n'), ((3602, 3670), 'summarize_memory.summarize_memory', 'summarize_memory', (['memory_dir', 'user_name'], {'language': 'data_args.language'}), '(memory_dir, user_name, language=data_args.language)\n', (3618, 3670), False, 'from summarize_memory import summarize_memory\n'), ((4000, 4063), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'data_args.memory_file'], {}), '(data_args.memory_basic_dir, data_args.memory_file)\n', (4012, 4063), False, 'import os\n'), ((167, 192), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (182, 192), False, 'import os\n'), ((338, 363), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (353, 363), False, 'import os\n'), ((1038, 1108), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'f"""memory_index/{name}_index"""'], {}), "(data_args.memory_basic_dir, f'memory_index/{name}_index')\n", (1050, 1108), False, 'import os\n'), ((2502, 2577), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'f"""memory_index/{name}_index.json"""'], {}), "(data_args.memory_basic_dir, f'memory_index/{name}_index.json')\n", (2514, 2577), False, 'import os\n'), ((2791, 2824), 'os.path.exists', 'os.path.exists', (['memory_index_path'], {}), '(memory_index_path)\n', (2805, 2824), False, 'import os\n'), ((4100, 4116), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4114, 4116), False, 'import os, shutil, datetime, time, json\n'), ((561, 582), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (580, 582), False, 'import os, shutil, datetime, time, json\n'), ((1128, 1162), 'os.path.dirname', 'os.path.dirname', (['memory_index_path'], {}), '(memory_index_path)\n', (1143, 1162), False, 'import os\n'), ((1420, 1453), 'os.path.exists', 'os.path.exists', (['memory_index_path'], {}), '(memory_index_path)\n', (1434, 1453), False, 'import os\n'), ((2733, 2781), 'build_memory_index.build_memory_index', 'build_memory_index', (['memory', 'data_args'], {'name': 'name'}), '(memory, data_args, name=name)\n', (2751, 2781), False, 'from build_memory_index import build_memory_index\n'), ((2858, 2912), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['memory_index_path'], {}), '(memory_index_path)\n', (2893, 2912), False, 'from llama_index import GPTSimpleVectorIndex\n'), ((1195, 1228), 'os.path.exists', 'os.path.exists', (['memory_index_path'], {}), '(memory_index_path)\n', (1209, 1228), False, 'import os\n'), ((1471, 1503), 'shutil.rmtree', 'shutil.rmtree', (['memory_index_path'], {}), '(memory_index_path)\n', (1484, 1503), False, 'import os, shutil, datetime, time, json\n'), ((2592, 2625), 'os.path.exists', 'os.path.exists', (['memory_index_path'], {}), '(memory_index_path)\n', (2606, 2625), False, 'import os\n')] |
from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage
from llama_index.storage.storage_context import StorageContext
from llama_index.indices.service_context import ServiceContext
from llama_index.llms import OpenAI
from llama_index.node_parser import SimpleNodeParser
from llama_index.node_parser.extractors import (
MetadataExtractor,
SummaryExtractor,
QuestionsAnsweredExtractor,
TitleExtractor,
KeywordExtractor,
)
from llama_index.text_splitter import TokenTextSplitter
from dotenv import load_dotenv
import openai
import gradio as gr
import sys, os
import logging
import json
#loads dotenv lib to retrieve API keys from .env file
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# enable INFO level logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#define LLM service
llm = OpenAI(temperature=0.1, model_name="gpt-3.5-turbo", max_tokens=512)
service_context = ServiceContext.from_defaults(llm=llm)
#construct text splitter to split texts into chunks for processing
text_splitter = TokenTextSplitter(separator=" ", chunk_size=512, chunk_overlap=128)
#set the global service context object, avoiding passing service_context when building the index
from llama_index import set_global_service_context
set_global_service_context(service_context)
#create metadata extractor
metadata_extractor = MetadataExtractor(
extractors=[
TitleExtractor(nodes=1, llm=llm),
QuestionsAnsweredExtractor(questions=3, llm=llm),
SummaryExtractor(summaries=["prev", "self"], llm=llm),
KeywordExtractor(keywords=10, llm=llm)
],
)
#create node parser to parse nodes from document
node_parser = SimpleNodeParser(
text_splitter=text_splitter,
metadata_extractor=metadata_extractor,
)
#loading documents
documents_2022 = SimpleDirectoryReader(input_files=["data/executive-summary-2022.pdf"], filename_as_id=True).load_data()
print(f"loaded documents_2022 with {len(documents_2022)} pages")
documents_2021 = SimpleDirectoryReader(input_files=["data/executive-summary-2021.pdf"], filename_as_id=True).load_data()
print(f"loaded documents_2021 with {len(documents_2021)} pages")
def load_index():
try:
#load storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
#try to load the index from storage
index = load_index_from_storage(storage_context)
logging.info("Index loaded from storage.")
except FileNotFoundError:
#if index not found, create a new one
logging.info("Index not found. Creating a new one...")
nodes_2022 = node_parser.get_nodes_from_documents(documents_2022)
nodes_2021 = node_parser.get_nodes_from_documents(documents_2021)
print(f"loaded nodes_2022 with {len(nodes_2022)} nodes")
print(f"loaded nodes_2021 with {len(nodes_2021)} nodes")
#print metadata in json format
for node in nodes_2022:
metadata_json = json.dumps(node.metadata, indent=4) # Convert metadata to formatted JSON
print(metadata_json)
for node in nodes_2021:
metadata_json = json.dumps(node.metadata, indent=4) # Convert metadata to formatted JSON
print(metadata_json)
#based on the nodes and service_context, create index
index = VectorStoreIndex(nodes=nodes_2022 + nodes_2021, service_context=service_context)
# Persist index to disk
index.storage_context.persist()
logging.info("New index created and persisted to storage.")
return index
def data_querying(input_text):
# Load index
index = load_index()
#queries the index with the input text
response = index.as_query_engine().query(input_text)
return response.response
iface = gr.Interface(fn=data_querying,
inputs=gr.components.Textbox(lines=3, label="Enter your question"),
outputs="text",
title="Analyzing the U.S. Government's Financial Reports for 2022")
iface.launch(share=False) | [
"llama_index.node_parser.extractors.KeywordExtractor",
"llama_index.text_splitter.TokenTextSplitter",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.SimpleDirectoryReader",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.node_parser.extractors.QuestionsAnsweredExtractor",
"llama_index.node_parser.extractors.SummaryExtractor",
"llama_index.llms.OpenAI",
"llama_index.VectorStoreIndex",
"llama_index.node_parser.extractors.TitleExtractor",
"llama_index.load_index_from_storage",
"llama_index.indices.service_context.ServiceContext.from_defaults",
"llama_index.set_global_service_context"
] | [((692, 705), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (703, 705), False, 'from dotenv import load_dotenv\n'), ((724, 751), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (733, 751), False, 'import sys, os\n'), ((781, 839), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (800, 839), False, 'import logging\n'), ((940, 1007), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': '(512)'}), "(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=512)\n", (946, 1007), False, 'from llama_index.llms import OpenAI\n'), ((1026, 1063), 'llama_index.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (1054, 1063), False, 'from llama_index.indices.service_context import ServiceContext\n'), ((1148, 1215), 'llama_index.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'separator': '""" """', 'chunk_size': '(512)', 'chunk_overlap': '(128)'}), "(separator=' ', chunk_size=512, chunk_overlap=128)\n", (1165, 1215), False, 'from llama_index.text_splitter import TokenTextSplitter\n'), ((1366, 1409), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1392, 1409), False, 'from llama_index import set_global_service_context\n'), ((1778, 1867), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {'text_splitter': 'text_splitter', 'metadata_extractor': 'metadata_extractor'}), '(text_splitter=text_splitter, metadata_extractor=\n metadata_extractor)\n', (1794, 1867), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((871, 911), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (892, 911), False, 'import logging\n'), ((840, 859), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (857, 859), False, 'import logging\n'), ((1911, 2006), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['data/executive-summary-2022.pdf']", 'filename_as_id': '(True)'}), "(input_files=['data/executive-summary-2022.pdf'],\n filename_as_id=True)\n", (1932, 2006), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((2097, 2192), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['data/executive-summary-2021.pdf']", 'filename_as_id': '(True)'}), "(input_files=['data/executive-summary-2021.pdf'],\n filename_as_id=True)\n", (2118, 2192), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((2356, 2409), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (2384, 2409), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((2470, 2510), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2493, 2510), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((2519, 2561), 'logging.info', 'logging.info', (['"""Index loaded from storage."""'], {}), "('Index loaded from storage.')\n", (2531, 2561), False, 'import logging\n'), ((3960, 4019), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(3)', 'label': '"""Enter your question"""'}), "(lines=3, label='Enter your question')\n", (3981, 4019), True, 'import gradio as gr\n'), ((1503, 1535), 'llama_index.node_parser.extractors.TitleExtractor', 'TitleExtractor', ([], {'nodes': '(1)', 'llm': 'llm'}), '(nodes=1, llm=llm)\n', (1517, 1535), False, 'from llama_index.node_parser.extractors import MetadataExtractor, SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor\n'), ((1545, 1593), 'llama_index.node_parser.extractors.QuestionsAnsweredExtractor', 'QuestionsAnsweredExtractor', ([], {'questions': '(3)', 'llm': 'llm'}), '(questions=3, llm=llm)\n', (1571, 1593), False, 'from llama_index.node_parser.extractors import MetadataExtractor, SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor\n'), ((1603, 1656), 'llama_index.node_parser.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'summaries': "['prev', 'self']", 'llm': 'llm'}), "(summaries=['prev', 'self'], llm=llm)\n", (1619, 1656), False, 'from llama_index.node_parser.extractors import MetadataExtractor, SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor\n'), ((1666, 1704), 'llama_index.node_parser.extractors.KeywordExtractor', 'KeywordExtractor', ([], {'keywords': '(10)', 'llm': 'llm'}), '(keywords=10, llm=llm)\n', (1682, 1704), False, 'from llama_index.node_parser.extractors import MetadataExtractor, SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor\n'), ((2655, 2709), 'logging.info', 'logging.info', (['"""Index not found. Creating a new one..."""'], {}), "('Index not found. Creating a new one...')\n", (2667, 2709), False, 'import logging\n'), ((3443, 3528), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': '(nodes_2022 + nodes_2021)', 'service_context': 'service_context'}), '(nodes=nodes_2022 + nodes_2021, service_context=service_context\n )\n', (3459, 3528), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((3604, 3663), 'logging.info', 'logging.info', (['"""New index created and persisted to storage."""'], {}), "('New index created and persisted to storage.')\n", (3616, 3663), False, 'import logging\n'), ((3089, 3124), 'json.dumps', 'json.dumps', (['node.metadata'], {'indent': '(4)'}), '(node.metadata, indent=4)\n', (3099, 3124), False, 'import json\n'), ((3257, 3292), 'json.dumps', 'json.dumps', (['node.metadata'], {'indent': '(4)'}), '(node.metadata, indent=4)\n', (3267, 3292), False, 'import json\n')] |
import os
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.bridge.pydantic import Field, PrivateAttr
from llama_index.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.llms.custom import CustomLLM
from llama_index.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.types import BaseOutputParser, PydanticProgramMode
from llama_index.utils import get_cache_dir
from byzerllm.utils.client import ByzerLLM
class ByzerAI(CustomLLM):
"""
ByzerAI is a custom LLM that uses the ByzerLLM API to generate text.
"""
verbose: bool = Field(
default=False,
description="Whether to print verbose output.",
)
_model: ByzerLLM = PrivateAttr()
def __init__(
self,
llm:ByzerLLM
) -> None:
self._model = llm
super().__init__()
@classmethod
def class_name(cls) -> str:
return "ByzerAI_llm"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=8024,
num_output=2048,
model_name=self._model.default_model_name,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
conversations = [{
"role":message.role,
"content":message.content
} for message in messages]
m = self._model.chat_oai(conversations=conversations)
completion_response = CompletionResponse(text=m[0].output, raw=None)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
conversations = [{
"role":message.role,
"content":message.content
} for message in messages]
m = self._model.stream_chat_oai(conversations=conversations)
def gen():
v = ""
for response in m:
text:str = response[0]
metadata:Dict[str,Any] = response[1]
completion_response = CompletionResponse(text=text, delta=text[len(v):], raw=None)
v = text
yield completion_response
return stream_completion_response_to_chat_response(gen())
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
m = self._model.chat_oai(conversations=[{"role":"user","content":prompt}])
completion_response = CompletionResponse(text=m[0].output, raw=None)
return completion_response
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
conversations=[{"role":"user","content":prompt}]
m = self._model.stream_chat_oai(conversations=conversations)
def gen():
v = ""
for response in m:
text:str = response[0]
metadata:Dict[str,Any] = response[1]
completion_response = CompletionResponse(text=text, delta=text[len(v):], raw=None)
v = text
yield completion_response
return gen() | [
"llama_index.core.llms.types.CompletionResponse",
"llama_index.bridge.pydantic.Field",
"llama_index.llms.base.llm_completion_callback",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.core.llms.types.LLMMetadata",
"llama_index.llms.base.llm_chat_callback",
"llama_index.llms.generic_utils.completion_response_to_chat_response"
] | [((858, 926), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to print verbose output."""'}), "(default=False, description='Whether to print verbose output.')\n", (863, 926), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((974, 987), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (985, 987), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1459, 1478), 'llama_index.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1476, 1478), False, 'from llama_index.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1914, 1933), 'llama_index.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1931, 1933), False, 'from llama_index.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2667, 2692), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (2690, 2692), False, 'from llama_index.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3015, 3040), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3038, 3040), False, 'from llama_index.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1310, 1407), 'llama_index.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': '(8024)', 'num_output': '(2048)', 'model_name': 'self._model.default_model_name'}), '(context_window=8024, num_output=2048, model_name=self._model.\n default_model_name)\n', (1321, 1407), False, 'from llama_index.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1788, 1834), 'llama_index.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'm[0].output', 'raw': 'None'}), '(text=m[0].output, raw=None)\n', (1806, 1834), False, 'from llama_index.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1850, 1907), 'llama_index.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (1886, 1907), False, 'from llama_index.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((2927, 2973), 'llama_index.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'm[0].output', 'raw': 'None'}), '(text=m[0].output, raw=None)\n', (2945, 2973), False, 'from llama_index.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n')] |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.base import BaseGPTIndex
from llama_index.core.llms.llm import LLM
from llama_index.core.node_parser import SentenceSplitter, TextSplitter
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import PromptTemplate
from llama_index.core.prompts.base import BasePromptTemplate
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
ResponseMode,
get_response_synthesizer,
)
from llama_index.core.schema import (
MetadataMode,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
llm_from_settings_or_context,
)
CITATION_QA_TEMPLATE = PromptTemplate(
"Please provide an answer based solely on the provided sources. "
"When referencing information from a source, "
"cite the appropriate source(s) using their corresponding numbers. "
"Every answer should include at least one source citation. "
"Only cite a source when you are explicitly referencing it. "
"If none of the sources are helpful, you should indicate that. "
"For example:\n"
"Source 1:\n"
"The sky is red in the evening and blue in the morning.\n"
"Source 2:\n"
"Water is wet when the sky is red.\n"
"Query: When is water wet?\n"
"Answer: Water will be wet when the sky is red [2], "
"which occurs in the evening [1].\n"
"Now it's your turn. Below are several numbered sources of information:"
"\n------\n"
"{context_str}"
"\n------\n"
"Query: {query_str}\n"
"Answer: "
)
CITATION_REFINE_TEMPLATE = PromptTemplate(
"Please provide an answer based solely on the provided sources. "
"When referencing information from a source, "
"cite the appropriate source(s) using their corresponding numbers. "
"Every answer should include at least one source citation. "
"Only cite a source when you are explicitly referencing it. "
"If none of the sources are helpful, you should indicate that. "
"For example:\n"
"Source 1:\n"
"The sky is red in the evening and blue in the morning.\n"
"Source 2:\n"
"Water is wet when the sky is red.\n"
"Query: When is water wet?\n"
"Answer: Water will be wet when the sky is red [2], "
"which occurs in the evening [1].\n"
"Now it's your turn. "
"We have provided an existing answer: {existing_answer}"
"Below are several numbered sources of information. "
"Use them to refine the existing answer. "
"If the provided sources are not helpful, you will repeat the existing answer."
"\nBegin refining!"
"\n------\n"
"{context_msg}"
"\n------\n"
"Query: {query_str}\n"
"Answer: "
)
DEFAULT_CITATION_CHUNK_SIZE = 512
DEFAULT_CITATION_CHUNK_OVERLAP = 20
class CitationQueryEngine(BaseQueryEngine):
"""Citation query engine.
Args:
retriever (BaseRetriever): A retriever object.
response_synthesizer (Optional[BaseSynthesizer]):
A BaseSynthesizer object.
citation_chunk_size (int):
Size of citation chunks, default=512. Useful for controlling
granularity of sources.
citation_chunk_overlap (int): Overlap of citation nodes, default=20.
text_splitter (Optional[TextSplitter]):
A text splitter for creating citation source nodes. Default is
a SentenceSplitter.
callback_manager (Optional[CallbackManager]): A callback manager.
metadata_mode (MetadataMode): A MetadataMode object that controls how
metadata is included in the citation prompt.
"""
def __init__(
self,
retriever: BaseRetriever,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
citation_chunk_size: int = DEFAULT_CITATION_CHUNK_SIZE,
citation_chunk_overlap: int = DEFAULT_CITATION_CHUNK_OVERLAP,
text_splitter: Optional[TextSplitter] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
metadata_mode: MetadataMode = MetadataMode.NONE,
) -> None:
self.text_splitter = text_splitter or SentenceSplitter(
chunk_size=citation_chunk_size, chunk_overlap=citation_chunk_overlap
)
self._retriever = retriever
service_context = retriever.get_service_context()
callback_manager = (
callback_manager
or callback_manager_from_settings_or_context(Settings, service_context)
)
llm = llm or llm_from_settings_or_context(Settings, service_context)
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
service_context=service_context,
callback_manager=callback_manager,
)
self._node_postprocessors = node_postprocessors or []
self._metadata_mode = metadata_mode
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = callback_manager
super().__init__(callback_manager=callback_manager)
@classmethod
def from_args(
cls,
index: BaseGPTIndex,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
citation_chunk_size: int = DEFAULT_CITATION_CHUNK_SIZE,
citation_chunk_overlap: int = DEFAULT_CITATION_CHUNK_OVERLAP,
text_splitter: Optional[TextSplitter] = None,
citation_qa_template: BasePromptTemplate = CITATION_QA_TEMPLATE,
citation_refine_template: BasePromptTemplate = CITATION_REFINE_TEMPLATE,
retriever: Optional[BaseRetriever] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
# response synthesizer args
response_mode: ResponseMode = ResponseMode.COMPACT,
use_async: bool = False,
streaming: bool = False,
# class-specific args
metadata_mode: MetadataMode = MetadataMode.NONE,
**kwargs: Any,
) -> "CitationQueryEngine":
"""Initialize a CitationQueryEngine object.".
Args:
index: (BastGPTIndex): index to use for querying
llm: (Optional[LLM]): LLM object to use for response generation.
citation_chunk_size (int):
Size of citation chunks, default=512. Useful for controlling
granularity of sources.
citation_chunk_overlap (int): Overlap of citation nodes, default=20.
text_splitter (Optional[TextSplitter]):
A text splitter for creating citation source nodes. Default is
a SentenceSplitter.
citation_qa_template (BasePromptTemplate): Template for initial citation QA
citation_refine_template (BasePromptTemplate):
Template for citation refinement.
retriever (BaseRetriever): A retriever object.
service_context (Optional[ServiceContext]): A ServiceContext object.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
verbose (bool): Whether to print out debug info.
response_mode (ResponseMode): A ResponseMode object.
use_async (bool): Whether to use async.
streaming (bool): Whether to use streaming.
optimizer (Optional[BaseTokenUsageOptimizer]): A BaseTokenUsageOptimizer
object.
"""
retriever = retriever or index.as_retriever(**kwargs)
response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
service_context=index.service_context,
text_qa_template=citation_qa_template,
refine_template=citation_refine_template,
response_mode=response_mode,
use_async=use_async,
streaming=streaming,
)
return cls(
retriever=retriever,
response_synthesizer=response_synthesizer,
callback_manager=callback_manager_from_settings_or_context(
Settings, index.service_context
),
citation_chunk_size=citation_chunk_size,
citation_chunk_overlap=citation_chunk_overlap,
text_splitter=text_splitter,
node_postprocessors=node_postprocessors,
metadata_mode=metadata_mode,
)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {"response_synthesizer": self._response_synthesizer}
def _create_citation_nodes(self, nodes: List[NodeWithScore]) -> List[NodeWithScore]:
"""Modify retrieved nodes to be granular sources."""
new_nodes: List[NodeWithScore] = []
for node in nodes:
text_chunks = self.text_splitter.split_text(
node.node.get_content(metadata_mode=self._metadata_mode)
)
for text_chunk in text_chunks:
text = f"Source {len(new_nodes)+1}:\n{text_chunk}\n"
new_node = NodeWithScore(
node=TextNode.parse_obj(node.node), score=node.score
)
new_node.node.text = text
new_nodes.append(new_node)
return new_nodes
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(nodes, query_bundle=query_bundle)
return nodes
async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(nodes, query_bundle=query_bundle)
return nodes
@property
def retriever(self) -> BaseRetriever:
"""Get the retriever object."""
return self._retriever
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
nodes = self._create_citation_nodes(nodes)
return self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
nodes = self._create_citation_nodes(nodes)
return await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = self.retrieve(query_bundle)
nodes = self._create_citation_nodes(nodes)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = await self.aretrieve(query_bundle)
nodes = self._create_citation_nodes(nodes)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
| [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.schema.TextNode.parse_obj"
] | [((1182, 1924), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. For example:\nSource 1:\nThe sky is red in the evening and blue in the morning.\nSource 2:\nWater is wet when the sky is red.\nQuery: When is water wet?\nAnswer: Water will be wet when the sky is red [2], which occurs in the evening [1].\nNow it\'s your turn. Below are several numbered sources of information:\n------\n{context_str}\n------\nQuery: {query_str}\nAnswer: """'], {}), '(\n """Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. For example:\nSource 1:\nThe sky is red in the evening and blue in the morning.\nSource 2:\nWater is wet when the sky is red.\nQuery: When is water wet?\nAnswer: Water will be wet when the sky is red [2], which occurs in the evening [1].\nNow it\'s your turn. Below are several numbered sources of information:\n------\n{context_str}\n------\nQuery: {query_str}\nAnswer: """\n )\n', (1196, 1924), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((2090, 3020), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. For example:\nSource 1:\nThe sky is red in the evening and blue in the morning.\nSource 2:\nWater is wet when the sky is red.\nQuery: When is water wet?\nAnswer: Water will be wet when the sky is red [2], which occurs in the evening [1].\nNow it\'s your turn. We have provided an existing answer: {existing_answer}Below are several numbered sources of information. Use them to refine the existing answer. If the provided sources are not helpful, you will repeat the existing answer.\nBegin refining!\n------\n{context_msg}\n------\nQuery: {query_str}\nAnswer: """'], {}), '(\n """Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. For example:\nSource 1:\nThe sky is red in the evening and blue in the morning.\nSource 2:\nWater is wet when the sky is red.\nQuery: When is water wet?\nAnswer: Water will be wet when the sky is red [2], which occurs in the evening [1].\nNow it\'s your turn. We have provided an existing answer: {existing_answer}Below are several numbered sources of information. Use them to refine the existing answer. If the provided sources are not helpful, you will repeat the existing answer.\nBegin refining!\n------\n{context_msg}\n------\nQuery: {query_str}\nAnswer: """\n )\n', (2104, 3020), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((4703, 4794), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'citation_chunk_size', 'chunk_overlap': 'citation_chunk_overlap'}), '(chunk_size=citation_chunk_size, chunk_overlap=\n citation_chunk_overlap)\n', (4719, 4794), False, 'from llama_index.core.node_parser import SentenceSplitter, TextSplitter\n'), ((4980, 5048), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5021, 5048), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((5080, 5135), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5108, 5135), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((5198, 5303), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm', 'service_context': 'service_context', 'callback_manager': 'callback_manager'}), '(llm=llm, service_context=service_context,\n callback_manager=callback_manager)\n', (5222, 5303), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, ResponseMode, get_response_synthesizer\n'), ((8135, 8373), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm', 'service_context': 'index.service_context', 'text_qa_template': 'citation_qa_template', 'refine_template': 'citation_refine_template', 'response_mode': 'response_mode', 'use_async': 'use_async', 'streaming': 'streaming'}), '(llm=llm, service_context=index.service_context,\n text_qa_template=citation_qa_template, refine_template=\n citation_refine_template, response_mode=response_mode, use_async=\n use_async, streaming=streaming)\n', (8159, 8373), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, ResponseMode, get_response_synthesizer\n'), ((8593, 8667), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'index.service_context'], {}), '(Settings, index.service_context)\n', (8634, 8667), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((9664, 9693), 'llama_index.core.schema.TextNode.parse_obj', 'TextNode.parse_obj', (['node.node'], {}), '(node.node)\n', (9682, 9693), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle, TextNode\n')] |
"""
# My first app
Here's our first attempt at using data to create a table:
"""
import logging
import sys
import streamlit as st
from clickhouse_connect import common
from llama_index.core.settings import Settings
from llama_index.embeddings.fastembed import FastEmbedEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex, PromptTemplate
from llama_index.core.indices.struct_store import NLSQLTableQueryEngine
from llama_index.core.indices.vector_store import VectorIndexAutoRetriever
from llama_index.core.indices.vector_store.retrievers.auto_retriever.prompts import PREFIX, EXAMPLES
from llama_index.core.prompts import PromptType
from llama_index.core.query_engine import RetrieverQueryEngine, SQLAutoVectorQueryEngine
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores.types import VectorStoreInfo, MetadataInfo
from llama_index.vector_stores.clickhouse import ClickHouseVectorStore
import clickhouse_connect
import openai
from sqlalchemy import (
create_engine,
)
from llama_index.core import SQLDatabase
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
host = st.secrets.clickhouse.host
password = st.secrets.clickhouse.password
username = st.secrets.clickhouse.username
secure = st.secrets.clickhouse.secure
http_port = st.secrets.clickhouse.http_port
native_port = st.secrets.clickhouse.native_port
open_ai_model = "gpt-4"
database = st.secrets.clickhouse.database
hackernews_table = st.secrets.clickhouse.hackernews_table
stackoverflow_table = st.secrets.clickhouse.stackoverflow_table
database = st.secrets.clickhouse.database
st.set_page_config(
page_title="Get summaries of Hacker News posts enriched with Stackoverflow survey results, powered by LlamaIndex and ClickHouse",
page_icon="🦙🚀", layout="centered", initial_sidebar_state="auto", menu_items=None)
st.title("💬HackBot powered by LlamaIndex 🦙 and ClickHouse 🚀")
st.info(
"Check out the full [blog post](https://clickhouse.com/blog/building-a-hackernews-chat-bot-with-llama-index-with-clickhouse/) for this app",
icon="📃")
st.caption("A Streamlit chatbot 💬 for Hacker News powered by LlamaIndex 🦙 and ClickHouse 🚀")
@st.cache_resource
def load_embedding():
return FastEmbedEmbedding(
model_name="sentence-transformers/all-MiniLM-L6-v2",
max_length=384,
cache_dir="./embeddings/"
)
Settings.embed_model = load_embedding()
CLICKHOUSE_TEXT_TO_SQL_TMPL = (
"Given an input question, first create a syntactically correct ClickHouse SQL "
"query to run, then look at the results of the query and return the answer. "
"You can order the results by a relevant column to return the most "
"interesting examples in the database.\n\n"
"Never query for all the columns from a specific table, only ask for a "
"few relevant columns given the question.\n\n"
"Pay attention to use only the column names that you can see in the schema "
"description. "
"Be careful to not query for columns that do not exist. "
"Pay attention to which column is in which table. "
"Also, qualify column names with the table name when needed. \n"
"If needing to group on Array Columns use the ClickHouse function arrayJoin e.g. arrayJoin(columnName) \n"
"For example, the following query identifies the most popular database:\n"
"SELECT d, count(*) AS count FROM so_surveys GROUP BY "
"arrayJoin(database_want_to_work_with) AS d ORDER BY count DESC LIMIT 1\n"
"You are required to use the following format, each taking one line:\n\n"
"Question: Question here\n"
"SQLQuery: SQL Query to run\n"
"SQLResult: Result of the SQLQuery\n"
"Answer: Final answer here\n\n"
"Only use tables listed below.\n"
"{schema}\n\n"
"Question: {query_str}\n"
"SQLQuery: "
)
CLICKHOUSE_TEXT_TO_SQL_PROMPT = PromptTemplate(
CLICKHOUSE_TEXT_TO_SQL_TMPL,
prompt_type=PromptType.TEXT_TO_SQL,
)
CLICKHOUSE_CUSTOM_SUFFIX = """
The following is the datasource schema to work with.
IMPORTANT: Make sure that filters are only used as needed and only suggest filters for fields in the data source.
Data Source:
```json
{info_str}
```
User Query:
{query_str}
Structured Request:
"""
CLICKHOUSE_VECTOR_STORE_QUERY_PROMPT_TMPL = PREFIX + EXAMPLES + CLICKHOUSE_CUSTOM_SUFFIX
@st.cache_resource
def clickhouse():
common.set_setting('autogenerate_session_id', False)
return clickhouse_connect.get_client(
host=host, port=http_port, username=username, password=password,
secure=secure, settings={"max_parallel_replicas": "3", "use_hedged_requests": "0",
"allow_experimental_parallel_reading_from_replicas": "1"}
)
def sql_auto_vector_query_engine():
with st.spinner(text="Preparing indexes. This should take a few seconds. No time to make 🫖"):
engine = create_engine(
f'clickhouse+native://{username}:{password}@{host}:' +
f'{native_port}/{database}?compression=lz4&secure={secure}'
)
sql_database = SQLDatabase(engine, include_tables=[stackoverflow_table], view_support=True)
vector_store = ClickHouseVectorStore(clickhouse_client=clickhouse(), table=hackernews_table)
vector_index = VectorStoreIndex.from_vector_store(vector_store)
return sql_database, vector_index
def get_engine(min_length, score, min_date):
sql_database, vector_index = sql_auto_vector_query_engine()
nl_sql_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=[stackoverflow_table],
text_to_sql_prompt=CLICKHOUSE_TEXT_TO_SQL_PROMPT,
llm=OpenAI(model=open_ai_model)
)
vector_store_info = VectorStoreInfo(
content_info="Social news posts and comments from users",
metadata_info=[
MetadataInfo(
name="post_score", type="int", description="Score of the comment or post",
),
MetadataInfo(
name="by", type="str", description="the author or person who posted the comment",
),
MetadataInfo(
name="time", type="date", description="the time at which the post or comment was made",
),
]
)
vector_auto_retriever = VectorIndexAutoRetriever(
vector_index, vector_store_info=vector_store_info, similarity_top_k=10,
prompt_template_str=CLICKHOUSE_VECTOR_STORE_QUERY_PROMPT_TMPL, llm=OpenAI(model=open_ai_model),
vector_store_kwargs={"where": f"length >= {min_length} AND post_score >= {score} AND time >= '{min_date}'"}
)
retriever_query_engine = RetrieverQueryEngine.from_args(vector_auto_retriever, llm=OpenAI(model=open_ai_model))
sql_tool = QueryEngineTool.from_defaults(
query_engine=nl_sql_engine,
description=(
"Useful for translating a natural language query into a SQL query over"
f" a table: {stackoverflow_table}, containing the survey responses on"
f" different types of technology users currently use and want to use"
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=retriever_query_engine,
description=(
f"Useful for answering semantic questions abouts users comments and posts"
),
)
return SQLAutoVectorQueryEngine(
sql_tool, vector_tool, llm=OpenAI(model=open_ai_model)
)
# identify the value ranges for our score, length and date widgets
if "max_score" not in st.session_state.keys():
client = clickhouse()
st.session_state.max_score = int(
client.query("SELECT max(post_score) FROM default.hackernews_llama").first_row[0])
st.session_state.max_length = int(
client.query("SELECT max(length) FROM default.hackernews_llama").first_row[0])
st.session_state.min_date, st.session_state.max_date = client.query(
"SELECT min(toDate(time)), max(toDate(time)) FROM default.hackernews_llama WHERE time != '1970-01-01 00:00:00'").first_row
# set the initial message on load. Store in the session.
if "messages" not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "Ask me a question about opinions on Hacker News and Stackoverflow!"}]
# build the sidebar with our filters
with st.sidebar:
score = st.slider('Min Score', 0, st.session_state.max_score, value=0)
min_length = st.slider('Min comment Length (tokens)', 0, st.session_state.max_length, value=20)
min_date = st.date_input('Min comment date', value=st.session_state.min_date, min_value=st.session_state.min_date,
max_value=st.session_state.max_date)
openai_api_key = st.text_input("Open API Key", key="chatbot_api_key", type="password")
openai.api_key = openai_api_key
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/ClickHouse/examples/blob/main/blog-examples/llama-index/hacknernews_app/hacker_insights.py)"
# grab the users OPENAI api key. Don’t allow questions if not entered.
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
if prompt := st.chat_input(placeholder="Your question about Hacker News"):
st.session_state.messages.append({"role": "user", "content": prompt})
# Display the prior chat messages
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
# Query our engine for the answer and write to the page
response = str(get_engine(min_length, score, min_date).query(prompt))
st.write(response)
st.session_state.messages.append({"role": "assistant", "content": response})
| [
"llama_index.core.PromptTemplate",
"llama_index.core.vector_stores.types.MetadataInfo",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.llms.openai.OpenAI",
"llama_index.core.tools.QueryEngineTool.from_defaults",
"llama_index.core.SQLDatabase",
"llama_index.embeddings.fastembed.FastEmbedEmbedding"
] | [((1100, 1158), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1119, 1158), False, 'import logging\n'), ((1713, 1957), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Get summaries of Hacker News posts enriched with Stackoverflow survey results, powered by LlamaIndex and ClickHouse"""', 'page_icon': '"""🦙🚀"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=\n 'Get summaries of Hacker News posts enriched with Stackoverflow survey results, powered by LlamaIndex and ClickHouse'\n , page_icon='🦙🚀', layout='centered', initial_sidebar_state='auto',\n menu_items=None)\n", (1731, 1957), True, 'import streamlit as st\n'), ((1953, 2014), 'streamlit.title', 'st.title', (['"""💬HackBot powered by LlamaIndex 🦙 and ClickHouse 🚀"""'], {}), "('💬HackBot powered by LlamaIndex 🦙 and ClickHouse 🚀')\n", (1961, 2014), True, 'import streamlit as st\n'), ((2015, 2183), 'streamlit.info', 'st.info', (['"""Check out the full [blog post](https://clickhouse.com/blog/building-a-hackernews-chat-bot-with-llama-index-with-clickhouse/) for this app"""'], {'icon': '"""📃"""'}), "(\n 'Check out the full [blog post](https://clickhouse.com/blog/building-a-hackernews-chat-bot-with-llama-index-with-clickhouse/) for this app'\n , icon='📃')\n", (2022, 2183), True, 'import streamlit as st\n'), ((2183, 2285), 'streamlit.caption', 'st.caption', (['"""A Streamlit chatbot 💬 for Hacker News powered by LlamaIndex 🦙 and ClickHouse 🚀"""'], {}), "(\n 'A Streamlit chatbot 💬 for Hacker News powered by LlamaIndex 🦙 and ClickHouse 🚀'\n )\n", (2193, 2285), True, 'import streamlit as st\n'), ((3944, 4023), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['CLICKHOUSE_TEXT_TO_SQL_TMPL'], {'prompt_type': 'PromptType.TEXT_TO_SQL'}), '(CLICKHOUSE_TEXT_TO_SQL_TMPL, prompt_type=PromptType.TEXT_TO_SQL)\n', (3958, 4023), False, 'from llama_index.core import VectorStoreIndex, PromptTemplate\n'), ((1190, 1230), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1211, 1230), False, 'import logging\n'), ((2330, 2448), 'llama_index.embeddings.fastembed.FastEmbedEmbedding', 'FastEmbedEmbedding', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""', 'max_length': '(384)', 'cache_dir': '"""./embeddings/"""'}), "(model_name='sentence-transformers/all-MiniLM-L6-v2',\n max_length=384, cache_dir='./embeddings/')\n", (2348, 2448), False, 'from llama_index.embeddings.fastembed import FastEmbedEmbedding\n'), ((4455, 4507), 'clickhouse_connect.common.set_setting', 'common.set_setting', (['"""autogenerate_session_id"""', '(False)'], {}), "('autogenerate_session_id', False)\n", (4473, 4507), False, 'from clickhouse_connect import common\n'), ((4519, 4767), 'clickhouse_connect.get_client', 'clickhouse_connect.get_client', ([], {'host': 'host', 'port': 'http_port', 'username': 'username', 'password': 'password', 'secure': 'secure', 'settings': "{'max_parallel_replicas': '3', 'use_hedged_requests': '0',\n 'allow_experimental_parallel_reading_from_replicas': '1'}"}), "(host=host, port=http_port, username=username,\n password=password, secure=secure, settings={'max_parallel_replicas':\n '3', 'use_hedged_requests': '0',\n 'allow_experimental_parallel_reading_from_replicas': '1'})\n", (4548, 4767), False, 'import clickhouse_connect\n'), ((6832, 7118), 'llama_index.core.tools.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'nl_sql_engine', 'description': 'f"""Useful for translating a natural language query into a SQL query over a table: {stackoverflow_table}, containing the survey responses on different types of technology users currently use and want to use"""'}), "(query_engine=nl_sql_engine, description=\n f'Useful for translating a natural language query into a SQL query over a table: {stackoverflow_table}, containing the survey responses on different types of technology users currently use and want to use'\n )\n", (6861, 7118), False, 'from llama_index.core.tools import QueryEngineTool\n'), ((7205, 7368), 'llama_index.core.tools.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'retriever_query_engine', 'description': 'f"""Useful for answering semantic questions abouts users comments and posts"""'}), "(query_engine=retriever_query_engine,\n description=\n f'Useful for answering semantic questions abouts users comments and posts')\n", (7234, 7368), False, 'from llama_index.core.tools import QueryEngineTool\n'), ((7605, 7628), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (7626, 7628), True, 'import streamlit as st\n'), ((8425, 8487), 'streamlit.slider', 'st.slider', (['"""Min Score"""', '(0)', 'st.session_state.max_score'], {'value': '(0)'}), "('Min Score', 0, st.session_state.max_score, value=0)\n", (8434, 8487), True, 'import streamlit as st\n'), ((8505, 8591), 'streamlit.slider', 'st.slider', (['"""Min comment Length (tokens)"""', '(0)', 'st.session_state.max_length'], {'value': '(20)'}), "('Min comment Length (tokens)', 0, st.session_state.max_length,\n value=20)\n", (8514, 8591), True, 'import streamlit as st\n'), ((8603, 8747), 'streamlit.date_input', 'st.date_input', (['"""Min comment date"""'], {'value': 'st.session_state.min_date', 'min_value': 'st.session_state.min_date', 'max_value': 'st.session_state.max_date'}), "('Min comment date', value=st.session_state.min_date,\n min_value=st.session_state.min_date, max_value=st.session_state.max_date)\n", (8616, 8747), True, 'import streamlit as st\n'), ((8794, 8863), 'streamlit.text_input', 'st.text_input', (['"""Open API Key"""'], {'key': '"""chatbot_api_key"""', 'type': '"""password"""'}), "('Open API Key', key='chatbot_api_key', type='password')\n", (8807, 8863), True, 'import streamlit as st\n'), ((9214, 9268), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (9221, 9268), True, 'import streamlit as st\n'), ((9273, 9282), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (9280, 9282), True, 'import streamlit as st\n'), ((9297, 9357), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Your question about Hacker News"""'}), "(placeholder='Your question about Hacker News')\n", (9310, 9357), True, 'import streamlit as st\n'), ((9363, 9432), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (9395, 9432), True, 'import streamlit as st\n'), ((1159, 1178), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1176, 1178), False, 'import logging\n'), ((4858, 4950), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Preparing indexes. This should take a few seconds. No time to make 🫖"""'}), "(text=\n 'Preparing indexes. This should take a few seconds. No time to make 🫖')\n", (4868, 4950), True, 'import streamlit as st\n'), ((4964, 5097), 'sqlalchemy.create_engine', 'create_engine', (["(f'clickhouse+native://{username}:{password}@{host}:' +\n f'{native_port}/{database}?compression=lz4&secure={secure}')"], {}), "(f'clickhouse+native://{username}:{password}@{host}:' +\n f'{native_port}/{database}?compression=lz4&secure={secure}')\n", (4977, 5097), False, 'from sqlalchemy import create_engine\n'), ((5151, 5227), 'llama_index.core.SQLDatabase', 'SQLDatabase', (['engine'], {'include_tables': '[stackoverflow_table]', 'view_support': '(True)'}), '(engine, include_tables=[stackoverflow_table], view_support=True)\n', (5162, 5227), False, 'from llama_index.core import SQLDatabase\n'), ((5352, 5400), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (5386, 5400), False, 'from llama_index.core import VectorStoreIndex, PromptTemplate\n'), ((9553, 9585), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (9568, 9585), True, 'import streamlit as st\n'), ((9595, 9623), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (9603, 9623), True, 'import streamlit as st\n'), ((9756, 9784), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (9771, 9784), True, 'import streamlit as st\n'), ((5741, 5768), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (5747, 5768), False, 'from llama_index.llms.openai import OpenAI\n'), ((6548, 6575), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (6554, 6575), False, 'from llama_index.llms.openai import OpenAI\n'), ((6787, 6814), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (6793, 6814), False, 'from llama_index.llms.openai import OpenAI\n'), ((7480, 7507), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (7486, 7507), False, 'from llama_index.llms.openai import OpenAI\n'), ((9799, 9824), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (9809, 9824), True, 'import streamlit as st\n'), ((9988, 10006), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (9996, 10006), True, 'import streamlit as st\n'), ((10019, 10095), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': response}"], {}), "({'role': 'assistant', 'content': response})\n", (10051, 10095), True, 'import streamlit as st\n'), ((5918, 6010), 'llama_index.core.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""post_score"""', 'type': '"""int"""', 'description': '"""Score of the comment or post"""'}), "(name='post_score', type='int', description=\n 'Score of the comment or post')\n", (5930, 6010), False, 'from llama_index.core.vector_stores.types import VectorStoreInfo, MetadataInfo\n'), ((6050, 6149), 'llama_index.core.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""by"""', 'type': '"""str"""', 'description': '"""the author or person who posted the comment"""'}), "(name='by', type='str', description=\n 'the author or person who posted the comment')\n", (6062, 6149), False, 'from llama_index.core.vector_stores.types import VectorStoreInfo, MetadataInfo\n'), ((6189, 6294), 'llama_index.core.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""time"""', 'type': '"""date"""', 'description': '"""the time at which the post or comment was made"""'}), "(name='time', type='date', description=\n 'the time at which the post or comment was made')\n", (6201, 6294), False, 'from llama_index.core.vector_stores.types import VectorStoreInfo, MetadataInfo\n')] |
import chromadb
import openai
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
load_dotenv()
from llama_index.llms import OpenAI
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.vector_stores import ChromaVectorStore
import os
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
print(OPENAI_API_KEY)
client = chromadb.PersistentClient(path=".chromadb/")
print(client.list_collections())
# get a collection
collection_name = input("请输入要获取的collection name:")
chroma_collection = client.get_collection(collection_name)
print(chroma_collection.count())
# 创建 ChatOpenAI 实例作为底层语言模型
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k-0613")
service_context = ServiceContext.from_defaults(llm=llm)
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(vector_store, service_context=service_context)
query_engine = index.as_query_engine(service_context=service_context, verbose=True, streaming=True)
while True:
user_input = []
print("请输入您的问题(纯文本格式),换行输入 n 以结束:")
while True:
line = input()
if line != "n":
user_input.append(line)
else:
break
user_input_text = "\n".join(user_input)
# print(user_input_text)
# print(user_input_text)
print("****Thingking******")
try:
r = query_engine.query(user_input_text)
print(r)
except Exception as e:
print("出现异常:", str(e))
| [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore"
] | [((107, 120), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (118, 120), False, 'from dotenv import load_dotenv\n'), ((298, 325), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (307, 325), False, 'import os\n'), ((390, 434), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '""".chromadb/"""'}), "(path='.chromadb/')\n", (415, 434), False, 'import chromadb\n'), ((665, 722), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo-16k-0613"""'}), "(temperature=0, model='gpt-3.5-turbo-16k-0613')\n", (675, 722), False, 'from langchain.chat_models import ChatOpenAI\n'), ((741, 778), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (769, 778), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((795, 849), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (812, 849), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((858, 944), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (892, 944), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
import os
from dotenv import load_dotenv
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor
from langchain.chat_models import ChatOpenAI
load_dotenv()
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_KEY')
def tune_llm(input_directory="sourcedata", output_file="indexdata/index.json"):
loaded_content = SimpleDirectoryReader(input_directory).load_data()
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name='gpt-3.5-turbo'))
output_index = GPTSimpleVectorIndex(loaded_content, llm_predictor=llm_predictor)
# Create the output directory if it doesn't exist
os.makedirs(os.path.dirname(output_file), exist_ok=True)
output_index.save_to_disk(output_file) | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((169, 182), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (180, 182), False, 'from dotenv import load_dotenv\n'), ((215, 238), 'os.getenv', 'os.getenv', (['"""OPENAI_KEY"""'], {}), "('OPENAI_KEY')\n", (224, 238), False, 'import os\n'), ((506, 571), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['loaded_content'], {'llm_predictor': 'llm_predictor'}), '(loaded_content, llm_predictor=llm_predictor)\n', (526, 571), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor\n'), ((643, 671), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (658, 671), False, 'import os\n'), ((341, 379), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['input_directory'], {}), '(input_directory)\n', (362, 379), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor\n'), ((430, 485), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.7, model_name='gpt-3.5-turbo')\n", (440, 485), False, 'from langchain.chat_models import ChatOpenAI\n')] |
from ..conversable_agent import ConversableAgent
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from ....utils.client import ByzerLLM
from byzerllm.utils.retrieval import ByzerRetrieval
from ..agent import Agent
import ray
from ray.util.client.common import ClientActorHandle, ClientObjectRef
from .. import get_agent_name,run_agent_func,ChatResponse
from byzerllm.apps.agent.extensions.simple_retrieval_client import SimpleRetrievalClient
import uuid
import json
from byzerllm.apps.llama_index import get_service_context,get_storage_context
from llama_index import VectorStoreIndex
from llama_index.query_engine import SubQuestionQueryEngine
try:
from termcolor import colored
except ImportError:
def colored(x, *args, **kwargs):
return x
from llama_index.tools import QueryEngineTool, ToolMetadata
class LlamaIndexSubQuestionAgent(ConversableAgent):
PROMPT_DEFAULT = """You're a retrieve augmented chatbot. """
DEFAULT_SYSTEM_MESSAGE = PROMPT_DEFAULT
def __init__(
self,
name: str,
llm: ByzerLLM,
retrieval: ByzerRetrieval,
chat_name:str,
owner:str,
update_context_retry: int = 3,
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
code_execution_config: Optional[Union[Dict, bool]] = False,
**kwargs,
):
super().__init__(
name,
llm,retrieval,
system_message,
is_termination_msg,
max_consecutive_auto_reply,
human_input_mode,
code_execution_config=code_execution_config,
**kwargs,
)
self.chat_name = chat_name
self.owner = owner
self.update_context_retry = update_context_retry
self._reply_func_list = []
# self.register_reply([Agent, ClientActorHandle,str], ConversableAgent.generate_llm_reply)
self.register_reply([Agent, ClientActorHandle,str], LlamaIndexSubQuestionAgent.generate_retrieval_based_reply)
self.register_reply([Agent, ClientActorHandle,str], ConversableAgent.check_termination_and_human_reply)
self.service_context = get_service_context(llm)
self.storage_context = get_storage_context(llm,retrieval)
def generate_retrieval_based_reply(
self,
raw_message: Optional[Union[Dict,str,ChatResponse]] = None,
messages: Optional[List[Dict]] = None,
sender: Optional[Union[ClientActorHandle,Agent,str]] = None,
config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None,ChatResponse]]:
if messages is None:
messages = self._messages[get_agent_name(sender)]
new_message = messages[-1]
index = VectorStoreIndex.from_vector_store(vector_store = self.storage_context.vector_store,service_context=self.service_context)
vector_query_engine = index.as_query_engine()
query_engine_tools = [
QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name="common",
description="common",
),
),
]
query_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=query_engine_tools,
service_context=self.service_context,
use_async=True,
)
response = query_engine.query(new_message["content"])
return True, {
"content":response.response,
"metadata":{"agent":self.name,"TERMINATE":True}
}
| [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.tools.ToolMetadata",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults"
] | [((2438, 2462), 'byzerllm.apps.llama_index.get_service_context', 'get_service_context', (['llm'], {}), '(llm)\n', (2457, 2462), False, 'from byzerllm.apps.llama_index import get_service_context, get_storage_context\n'), ((2494, 2529), 'byzerllm.apps.llama_index.get_storage_context', 'get_storage_context', (['llm', 'retrieval'], {}), '(llm, retrieval)\n', (2513, 2529), False, 'from byzerllm.apps.llama_index import get_service_context, get_storage_context\n'), ((3092, 3217), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'self.storage_context.vector_store', 'service_context': 'self.service_context'}), '(vector_store=self.storage_context.\n vector_store, service_context=self.service_context)\n', (3126, 3217), False, 'from llama_index import VectorStoreIndex\n'), ((3754, 3887), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools', 'service_context': 'self.service_context', 'use_async': '(True)'}), '(query_engine_tools=query_engine_tools,\n service_context=self.service_context, use_async=True)\n', (3790, 3887), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((3463, 3512), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""common"""', 'description': '"""common"""'}), "(name='common', description='common')\n", (3475, 3512), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
from typing import Union, Optional, List
from llama_index.chat_engine.types import BaseChatEngine, ChatMode
from llama_index.embeddings.utils import EmbedType
from llama_index.chat_engine import ContextChatEngine
from llama_index.memory import ChatMemoryBuffer
from lyzr.base.llm import LyzrLLMFactory
from lyzr.base.service import LyzrService
from lyzr.base.vector_store import LyzrVectorStoreIndex
from lyzr.base.retrievers import LyzrRetriever
from lyzr.utils.document_reading import (
read_pdf_as_documents,
read_docx_as_documents,
read_txt_as_documents,
read_website_as_documents,
read_webpage_as_documents,
read_youtube_as_documents,
)
def pdf_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_pdf_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def txt_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_txt_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def docx_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_docx_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def webpage_chat_(
url: str = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_webpage_as_documents(
url=url,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def website_chat_(
url: str = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_website_as_documents(
url=url,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def youtube_chat_(
urls: List[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_youtube_as_documents(
urls=urls,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
| [
"llama_index.memory.ChatMemoryBuffer.from_defaults"
] | [((1242, 1430), 'lyzr.utils.document_reading.read_pdf_as_documents', 'read_pdf_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (1263, 1430), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((2161, 2203), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (2189, 2203), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((2226, 2393), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (2251, 2393), False, 'from lyzr.base.service import LyzrService\n'), ((2457, 2573), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (2491, 2573), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((2600, 2678), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (2627, 2678), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((2707, 2755), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (2737, 2755), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((3528, 3716), 'lyzr.utils.document_reading.read_txt_as_documents', 'read_txt_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (3549, 3716), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((4447, 4489), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (4475, 4489), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((4512, 4679), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (4537, 4679), False, 'from lyzr.base.service import LyzrService\n'), ((4743, 4859), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (4777, 4859), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((4886, 4964), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (4913, 4964), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((4993, 5041), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (5023, 5041), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((5815, 6004), 'lyzr.utils.document_reading.read_docx_as_documents', 'read_docx_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (5837, 6004), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((6735, 6777), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (6763, 6777), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((6800, 6967), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (6825, 6967), False, 'from lyzr.base.service import LyzrService\n'), ((7031, 7147), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (7065, 7147), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((7174, 7252), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (7201, 7252), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((7281, 7329), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (7311, 7329), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((7909, 7943), 'lyzr.utils.document_reading.read_webpage_as_documents', 'read_webpage_as_documents', ([], {'url': 'url'}), '(url=url)\n', (7934, 7943), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((8643, 8685), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (8671, 8685), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((8708, 8875), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (8733, 8875), False, 'from lyzr.base.service import LyzrService\n'), ((8939, 9055), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (8973, 9055), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((9082, 9160), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (9109, 9160), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((9189, 9237), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (9219, 9237), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((9817, 9851), 'lyzr.utils.document_reading.read_website_as_documents', 'read_website_as_documents', ([], {'url': 'url'}), '(url=url)\n', (9842, 9851), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((10551, 10593), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (10579, 10593), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((10616, 10783), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (10641, 10783), False, 'from lyzr.base.service import LyzrService\n'), ((10847, 10963), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (10881, 10963), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((10990, 11068), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (11017, 11068), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((11097, 11145), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (11127, 11145), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((11732, 11768), 'lyzr.utils.document_reading.read_youtube_as_documents', 'read_youtube_as_documents', ([], {'urls': 'urls'}), '(urls=urls)\n', (11757, 11768), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((12468, 12510), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (12496, 12510), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((12533, 12700), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (12558, 12700), False, 'from lyzr.base.service import LyzrService\n'), ((12764, 12880), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (12798, 12880), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((12907, 12985), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (12934, 12985), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((13014, 13062), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (13044, 13062), False, 'from llama_index.memory import ChatMemoryBuffer\n')] |
import json
from util import rm_file
from tqdm import tqdm
import argparse
from copy import deepcopy
import os
from util import JSONReader
import openai
from typing import List, Dict
from llama_index import (
ServiceContext,
OpenAIEmbedding,
PromptHelper,
VectorStoreIndex,
set_global_service_context
)
from llama_index.extractors import BaseExtractor
from llama_index.ingestion import IngestionPipeline
from llama_index.embeddings.cohereai import CohereEmbedding
from llama_index.llms import OpenAI
from llama_index.text_splitter import SentenceSplitter
from llama_index.embeddings import HuggingFaceEmbedding,VoyageEmbedding,InstructorEmbedding
from llama_index.postprocessor import FlagEmbeddingReranker
from llama_index.schema import QueryBundle,MetadataMode
class CustomExtractor(BaseExtractor):
async def aextract(self, nodes) -> List[Dict]:
metadata_list = [
{
"title": (
node.metadata["title"]
),
"source": (
node.metadata["source"]
),
"published_at": (
node.metadata["published_at"]
)
}
for node in nodes
]
return metadata_list
if __name__ == '__main__':
openai.api_key = os.environ.get("OPENAI_API_KEY", "your_openai_api_key")
openai.base_url = "your_api_base"
voyage_api_key = os.environ.get("VOYAGE_API_KEY", "your_voyage_api_key")
cohere_api_key = os.environ.get("COHERE_API_KEY", "your_cohere_api_key")
parser = argparse.ArgumentParser(description="running script.")
parser.add_argument('--retriever', type=str, required=True, help='retriever name')
parser.add_argument('--llm', type=str, required=False,default="gpt-3.5-turbo-1106", help='LLMs')
parser.add_argument('--rerank', action='store_true',required=False,default=False, help='if rerank')
parser.add_argument('--topk', type=int, required=False,default=10, help='Top K')
parser.add_argument('--chunk_size', type=int, required=False,default=256, help='chunk_size')
parser.add_argument('--context_window', type=int, required=False,default=2048, help='context_window')
parser.add_argument('--num_output', type=int, required=False,default=256, help='num_output')
args = parser.parse_args()
model_name = args.retriever
rerank = args.rerank
top_k = args.topk
save_model_name = model_name.split('/')
llm = OpenAI(model=args.llm, temperature=0, max_tokens=args.context_window)
# define save file
if rerank:
save_file = f'output/{save_model_name[-1]}_rerank_retrieval_test.json'
else:
save_file = f'output/{save_model_name[-1]}_retrieval_test.json'
rm_file(save_file)
print(f'save_file:{save_file}')
if 'text' in model_name:
# "text-embedding-ada-002" “text-search-ada-query-001”
embed_model = OpenAIEmbedding(model = model_name,embed_batch_size=10)
elif 'Cohere' in model_name:
embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key,
model_name="embed-english-v3.0",
input_type="search_query",
)
elif 'voyage-02' in model_name:
embed_model = VoyageEmbedding(
model_name='voyage-02', voyage_api_key=voyage_api_key
)
elif 'instructor' in model_name:
embed_model = InstructorEmbedding(model_name=model_name)
else:
embed_model = HuggingFaceEmbedding(model_name=model_name, trust_remote_code=True)
# service context
text_splitter = SentenceSplitter(chunk_size=args.chunk_size)
prompt_helper = PromptHelper(
context_window=args.context_window,
num_output=args.num_output,
chunk_overlap_ratio=0.1,
chunk_size_limit=None,
)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
text_splitter=text_splitter,
prompt_helper=prompt_helper,
)
set_global_service_context(service_context)
reader = JSONReader()
data = reader.load_data('dataset/corpus.json')
# print(data[0])
transformations = [text_splitter,CustomExtractor()]
pipeline = IngestionPipeline(transformations=transformations)
nodes = pipeline.run(documents=data)
nodes_see = deepcopy(nodes)
print(
"LLM sees:\n",
(nodes_see)[0].get_content(metadata_mode=MetadataMode.LLM),
)
print('Finish Loading...')
index = VectorStoreIndex(nodes, show_progress=True)
print('Finish Indexing...')
with open('dataset/MultiHopRAG.json', 'r') as file:
query_data = json.load(file)
if rerank:
rerank_postprocessors = FlagEmbeddingReranker(model="BAAI/bge-reranker-large", top_n=top_k)
# test retrieval quality
retrieval_save_list = []
print("start to retrieve...")
for data in tqdm(query_data):
query = data['query']
if rerank:
nodes_score = index.as_retriever(similarity_top_k=20).retrieve(query)
nodes_score = rerank_postprocessors.postprocess_nodes(
nodes_score, query_bundle=QueryBundle(query_str=query)
)
else:
nodes_score = index.as_retriever(similarity_top_k=top_k).retrieve(query)
retrieval_list = []
for ns in nodes_score:
dic = {}
dic['text'] = ns.get_content(metadata_mode=MetadataMode.LLM)
dic['score'] = ns.get_score()
retrieval_list.append(dic)
save = {}
save['query'] = data['query']
save['answer'] = data['answer']
save['question_type'] = data['question_type']
save['retrieval_list'] = retrieval_list
save['gold_list'] = data['evidence_list']
retrieval_save_list.append(save)
with open(save_file, 'w') as json_file:
json.dump(retrieval_save_list, json_file)
| [
"llama_index.OpenAIEmbedding",
"llama_index.ServiceContext.from_defaults",
"llama_index.embeddings.cohereai.CohereEmbedding",
"llama_index.VectorStoreIndex",
"llama_index.postprocessor.FlagEmbeddingReranker",
"llama_index.llms.OpenAI",
"llama_index.embeddings.VoyageEmbedding",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.PromptHelper",
"llama_index.text_splitter.SentenceSplitter",
"llama_index.embeddings.InstructorEmbedding",
"llama_index.set_global_service_context",
"llama_index.schema.QueryBundle",
"llama_index.ingestion.IngestionPipeline"
] | [((1340, 1395), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""', '"""your_openai_api_key"""'], {}), "('OPENAI_API_KEY', 'your_openai_api_key')\n", (1354, 1395), False, 'import os\n'), ((1455, 1510), 'os.environ.get', 'os.environ.get', (['"""VOYAGE_API_KEY"""', '"""your_voyage_api_key"""'], {}), "('VOYAGE_API_KEY', 'your_voyage_api_key')\n", (1469, 1510), False, 'import os\n'), ((1532, 1587), 'os.environ.get', 'os.environ.get', (['"""COHERE_API_KEY"""', '"""your_cohere_api_key"""'], {}), "('COHERE_API_KEY', 'your_cohere_api_key')\n", (1546, 1587), False, 'import os\n'), ((1607, 1661), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""running script."""'}), "(description='running script.')\n", (1630, 1661), False, 'import argparse\n'), ((2504, 2573), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'args.llm', 'temperature': '(0)', 'max_tokens': 'args.context_window'}), '(model=args.llm, temperature=0, max_tokens=args.context_window)\n', (2510, 2573), False, 'from llama_index.llms import OpenAI\n'), ((2778, 2796), 'util.rm_file', 'rm_file', (['save_file'], {}), '(save_file)\n', (2785, 2796), False, 'from util import rm_file\n'), ((3610, 3654), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'args.chunk_size'}), '(chunk_size=args.chunk_size)\n', (3626, 3654), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((3675, 3803), 'llama_index.PromptHelper', 'PromptHelper', ([], {'context_window': 'args.context_window', 'num_output': 'args.num_output', 'chunk_overlap_ratio': '(0.1)', 'chunk_size_limit': 'None'}), '(context_window=args.context_window, num_output=args.num_output,\n chunk_overlap_ratio=0.1, chunk_size_limit=None)\n', (3687, 3803), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((3861, 3985), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'text_splitter': 'text_splitter', 'prompt_helper': 'prompt_helper'}), '(llm=llm, embed_model=embed_model,\n text_splitter=text_splitter, prompt_helper=prompt_helper)\n', (3889, 3985), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((4025, 4068), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (4051, 4068), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((4083, 4095), 'util.JSONReader', 'JSONReader', ([], {}), '()\n', (4093, 4095), False, 'from util import JSONReader\n'), ((4250, 4300), 'llama_index.ingestion.IngestionPipeline', 'IngestionPipeline', ([], {'transformations': 'transformations'}), '(transformations=transformations)\n', (4267, 4300), False, 'from llama_index.ingestion import IngestionPipeline\n'), ((4358, 4373), 'copy.deepcopy', 'deepcopy', (['nodes'], {}), '(nodes)\n', (4366, 4373), False, 'from copy import deepcopy\n'), ((4526, 4569), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'show_progress': '(True)'}), '(nodes, show_progress=True)\n', (4542, 4569), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((4921, 4937), 'tqdm.tqdm', 'tqdm', (['query_data'], {}), '(query_data)\n', (4925, 4937), False, 'from tqdm import tqdm\n'), ((2948, 3002), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': 'model_name', 'embed_batch_size': '(10)'}), '(model=model_name, embed_batch_size=10)\n', (2963, 3002), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((4680, 4695), 'json.load', 'json.load', (['file'], {}), '(file)\n', (4689, 4695), False, 'import json\n'), ((4744, 4811), 'llama_index.postprocessor.FlagEmbeddingReranker', 'FlagEmbeddingReranker', ([], {'model': '"""BAAI/bge-reranker-large"""', 'top_n': 'top_k'}), "(model='BAAI/bge-reranker-large', top_n=top_k)\n", (4765, 4811), False, 'from llama_index.postprocessor import FlagEmbeddingReranker\n'), ((5926, 5967), 'json.dump', 'json.dump', (['retrieval_save_list', 'json_file'], {}), '(retrieval_save_list, json_file)\n', (5935, 5967), False, 'import json\n'), ((3059, 3170), 'llama_index.embeddings.cohereai.CohereEmbedding', 'CohereEmbedding', ([], {'cohere_api_key': 'cohere_api_key', 'model_name': '"""embed-english-v3.0"""', 'input_type': '"""search_query"""'}), "(cohere_api_key=cohere_api_key, model_name=\n 'embed-english-v3.0', input_type='search_query')\n", (3074, 3170), False, 'from llama_index.embeddings.cohereai import CohereEmbedding\n'), ((3271, 3341), 'llama_index.embeddings.VoyageEmbedding', 'VoyageEmbedding', ([], {'model_name': '"""voyage-02"""', 'voyage_api_key': 'voyage_api_key'}), "(model_name='voyage-02', voyage_api_key=voyage_api_key)\n", (3286, 3341), False, 'from llama_index.embeddings import HuggingFaceEmbedding, VoyageEmbedding, InstructorEmbedding\n'), ((3423, 3465), 'llama_index.embeddings.InstructorEmbedding', 'InstructorEmbedding', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (3442, 3465), False, 'from llama_index.embeddings import HuggingFaceEmbedding, VoyageEmbedding, InstructorEmbedding\n'), ((3498, 3565), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'model_name', 'trust_remote_code': '(True)'}), '(model_name=model_name, trust_remote_code=True)\n', (3518, 3565), False, 'from llama_index.embeddings import HuggingFaceEmbedding, VoyageEmbedding, InstructorEmbedding\n'), ((5190, 5218), 'llama_index.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (5201, 5218), False, 'from llama_index.schema import QueryBundle, MetadataMode\n')] |
import pinecone
import torch
import numpy as np
import torchvision.transforms as T
from PIL import Image
import os
import tqdm
import openai
import hashlib
import io
from gradio_client import Client
from monitor import Monitor, monitoring
from llama_index.vector_stores import PineconeVectorStore
from llama_index import VectorStoreIndex
# from llama_index.storage.storage_context import StorageContext
# from llama_index.vector_stores import PineconeVectorStore
# from llama_index.llms import OpenAI
# from llama_index import (
# VectorStoreIndex,
# SimpleWebPageReader,
# LLMPredictor,
# ServiceContext
# )
# from trulens_eval import TruLlama, Feedback, Tru, feedback
# from trulens_eval.feedback import GroundTruthAgreement, Groundedness
from pathlib import Path
from trulens_eval import Feedback, Tru, TruLlama
from trulens_eval.feedback import Groundedness
from trulens_eval.feedback.provider.openai import OpenAI
tru = Tru()
import numpy as np
# Initialize provider class
openai_tl = OpenAI()
grounded = Groundedness(groundedness_provider=OpenAI())
# Define a groundedness feedback function
f_groundedness = Feedback(grounded.groundedness_measure_with_cot_reasons).on(
TruLlama.select_source_nodes().node.text
).on_output(
).aggregate(grounded.grounded_statements_aggregator)
# Question/answer relevance between overall question and answer.
f_qa_relevance = Feedback(openai_tl.relevance).on_input_output()
# Question/statement relevance between question and each context chunk.
f_qs_relevance = Feedback(openai_tl.qs_relevance).on_input().on(
TruLlama.select_source_nodes().node.text
).aggregate(np.mean)
index_name = "medical-images"
client = Client("https://42976740ac53ddbe7d.gradio.live/")
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
PINECONE_ENVIRONMENT = os.getenv('PINECONE_ENVIRONMENT')
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENVIRONMENT
)
index = pinecone.Index(index_name)
vector_store = PineconeVectorStore(pinecone_index=index)
l_index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
query_engine = l_index.as_query_engine()
tru_query_engine_recorder = TruLlama(query_engine,
app_id='LlamaIndex_App1',
feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance])
dinov2_vits14 = torch.hub.load("facebookresearch/dinov2", "dinov2_vits14")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dinov2_vits14.to(device)
transform_image = T.Compose([T.ToTensor(),
T.Resize(224),
T.CenterCrop(224),
T.Normalize([0.5], [0.5])])
@Monitor.monitor
def compute_embedding(file) -> dict:
"""
Create an index that contains all of the images in the specified list of files.
"""
with torch.no_grad():
embedding = dinov2_vits14(load_image(file).to(device))
print(f"embedding shape before: {embedding.shape}")
embeddings_numpy = np.array(embedding[0].cpu().numpy()).reshape(1, -1)
padded_embedding = pad_embedding(embeddings_numpy)
print(f"embedding shape after padding: {padded_embedding.shape}")
return padded_embedding
@Monitor.monitor
def load_image(file) -> torch.Tensor:
"""
Load a an image and return a tensor that can be used as an input to DINOv2.
"""
# Assuming it's PNG or JPEG
img = Image.open(file).convert("RGB")
transformed_img = transform_image(img)[:3].unsqueeze(0)
return transformed_img
@Monitor.monitor
def pad_embedding(embedding: np.ndarray, target_dim: int = 512) -> np.ndarray:
"""
Pad the given embedding with zeros to match the target dimension.
"""
original_dim = embedding.shape[1]
padding_dim = target_dim - original_dim
if padding_dim > 0:
padding = np.zeros((1, padding_dim))
padded_embedding = np.hstack([embedding, padding])
else:
padded_embedding = embedding
return padded_embedding
@Monitor.monitor
def add_embedding_to_index(id: str, embedding):
single_vector = {
'id': id,
'values': embedding.flatten().tolist(),
'metadata': {'modality': 'mri'}
}
upsert_response = index.upsert(vectors=[single_vector])
print(f"Inserted {single_vector}")
@Monitor.monitor
def img_to_vector_db(img_path, index):
embedding = compute_embedding(img_path)
add_embedding_to_index(id=str(index), embedding=embedding)
def hash_file(image_path: str) -> str:
"""
Hash the filename to create a unique ID.
"""
filename = image_path.split("/")[-1]
unique_id = hashlib.sha256(filename.encode()).hexdigest()
return unique_id
@Monitor.monitor
def retrieve(embedding):
response = index.query(
vector=embedding.flatten().tolist(),
top_k=3,
include_values=True,
include_metadata=True
)
result =[ m["metadata"]["report"] for m in response["matches"]]
urls = []
for m in response["matches"]:
if "download_path" in m["metadata"]:
urls.append(m["metadata"]["download_path"])
return result, urls
@Monitor.monitor
def generate_response(result, query, li_response):
result = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content":
"""
Objective: Generate a concise radiologic diagnosis based on SHARED FEATURES from the provided radiology reports.
Definition of SHARED FEATURES: Features that appear in more than one report. Features unique to a single report are not considered SHARED.
Instructions:
Analyze the provided radiology reports.
Identify any SHARED FEATURES, these should be the diagnosis and not radiologic features.
If SHARED FEATURES are found, provide a radiologic diagnosis in one sentence.
If no SHARED FEATURES are identified, simply state: "Radiologic Diagnosis: Diagnosis not possible."
Return the reports summarized as well.
"""
},
{"role": "assistant", "content": "Reports:"+ "\n-".join(result)},
{"role": "user", "content": query},
]
,
temperature=0)
return result
@Monitor.monitor
def llama_index_response(query, result):
from llama_index import SummaryIndex
from llama_index.schema import TextNode
index = SummaryIndex([TextNode(text=r) for r in result])
summary_query_engine = index.as_query_engine()
tru_query_engine_recorder_tmp = TruLlama(summary_query_engine,
app_id='LlamaIndex_App1',
feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance])
with tru_query_engine_recorder_tmp as recording:
li_response = summary_query_engine.query(query)
return li_response
def predict(file, query):
embedding = compute_embedding(file)
retrieved_result, urls = retrieve(embedding)
li_response = llama_index_response(query, retrieved_result)
result = generate_response(retrieved_result, query, li_response)
result = result['choices'][0]['message']['content']
result = "**Retrieved Reports:** " + ' \n'.join(retrieved_result) + " \n**Images:** " + (' \n').join(urls) + " \n **Final Diagnosis:** " + result
return result
# result = predict(img_path=img_path)
# print(f"ID: {result['matches'][0]['id']} | Similarity score: {round(result['matches'][0]['score'], 2)}")
# new_img
| [
"llama_index.schema.TextNode",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.PineconeVectorStore"
] | [((945, 950), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (948, 950), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1012, 1020), 'trulens_eval.feedback.provider.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (1018, 1020), False, 'from trulens_eval.feedback.provider.openai import OpenAI\n'), ((1697, 1746), 'gradio_client.Client', 'Client', (['"""https://42976740ac53ddbe7d.gradio.live/"""'], {}), "('https://42976740ac53ddbe7d.gradio.live/')\n", (1703, 1746), False, 'from gradio_client import Client\n'), ((1766, 1795), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (1775, 1795), False, 'import os\n'), ((1819, 1852), 'os.getenv', 'os.getenv', (['"""PINECONE_ENVIRONMENT"""'], {}), "('PINECONE_ENVIRONMENT')\n", (1828, 1852), False, 'import os\n'), ((1854, 1927), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'PINECONE_API_KEY', 'environment': 'PINECONE_ENVIRONMENT'}), '(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)\n', (1867, 1927), False, 'import pinecone\n'), ((1947, 1973), 'pinecone.Index', 'pinecone.Index', (['index_name'], {}), '(index_name)\n', (1961, 1973), False, 'import pinecone\n'), ((1989, 2030), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'index'}), '(pinecone_index=index)\n', (2008, 2030), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((2041, 2102), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2075, 2102), False, 'from llama_index import VectorStoreIndex\n'), ((2173, 2285), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': '"""LlamaIndex_App1"""', 'feedbacks': '[f_groundedness, f_qa_relevance, f_qs_relevance]'}), "(query_engine, app_id='LlamaIndex_App1', feedbacks=[f_groundedness,\n f_qa_relevance, f_qs_relevance])\n", (2181, 2285), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((2308, 2366), 'torch.hub.load', 'torch.hub.load', (['"""facebookresearch/dinov2"""', '"""dinov2_vits14"""'], {}), "('facebookresearch/dinov2', 'dinov2_vits14')\n", (2322, 2366), False, 'import torch\n'), ((6490, 6611), 'trulens_eval.TruLlama', 'TruLlama', (['summary_query_engine'], {'app_id': '"""LlamaIndex_App1"""', 'feedbacks': '[f_groundedness, f_qa_relevance, f_qs_relevance]'}), "(summary_query_engine, app_id='LlamaIndex_App1', feedbacks=[\n f_groundedness, f_qa_relevance, f_qs_relevance])\n", (6498, 6611), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1068, 1076), 'trulens_eval.feedback.provider.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (1074, 1076), False, 'from trulens_eval.feedback.provider.openai import OpenAI\n'), ((1401, 1430), 'trulens_eval.Feedback', 'Feedback', (['openai_tl.relevance'], {}), '(openai_tl.relevance)\n', (1409, 1430), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((2399, 2424), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2422, 2424), False, 'import torch\n'), ((2492, 2504), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2502, 2504), True, 'import torchvision.transforms as T\n'), ((2535, 2548), 'torchvision.transforms.Resize', 'T.Resize', (['(224)'], {}), '(224)\n', (2543, 2548), True, 'import torchvision.transforms as T\n'), ((2579, 2596), 'torchvision.transforms.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (2591, 2596), True, 'import torchvision.transforms as T\n'), ((2627, 2652), 'torchvision.transforms.Normalize', 'T.Normalize', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (2638, 2652), True, 'import torchvision.transforms as T\n'), ((2820, 2835), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2833, 2835), False, 'import torch\n'), ((3848, 3874), 'numpy.zeros', 'np.zeros', (['(1, padding_dim)'], {}), '((1, padding_dim))\n', (3856, 3874), True, 'import numpy as np\n'), ((3902, 3933), 'numpy.hstack', 'np.hstack', (['[embedding, padding]'], {}), '([embedding, padding])\n', (3911, 3933), True, 'import numpy as np\n'), ((3415, 3431), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (3425, 3431), False, 'from PIL import Image\n'), ((6367, 6383), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'r'}), '(text=r)\n', (6375, 6383), False, 'from llama_index.schema import TextNode\n'), ((1591, 1621), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1619, 1621), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1138, 1194), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {}), '(grounded.groundedness_measure_with_cot_reasons)\n', (1146, 1194), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1539, 1571), 'trulens_eval.Feedback', 'Feedback', (['openai_tl.qs_relevance'], {}), '(openai_tl.qs_relevance)\n', (1547, 1571), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1203, 1233), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1231, 1233), False, 'from trulens_eval import Feedback, Tru, TruLlama\n')] |
# Copyright 2023 Qarik Group, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import threading
from datetime import datetime
from pathlib import Path
from typing import Any, List
from common import admin_dao, constants, gcs_tools, solution
from common.cache import cache
from common.log import Logger, log, log_params
from langchain.llms.openai import OpenAIChat
from llama_index import (Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext,
SimpleDirectoryReader, StorageContext, load_index_from_storage)
from llama_index.indices.composability import ComposableGraph
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.query.query_transform.base import DecomposeQueryTransform
# import google.generativeai as palm
# from llama_index.query_engine.router_query_engine import RouterQueryEngine
from llama_index.query_engine.transform_query_engine import TransformQueryEngine
# from llama_index.selectors.llm_selectors import LLMSingleSelector
# from llama_index.tools.query_engine import QueryEngineTool
logger = Logger(__name__).get_logger()
logger.info('Initializing...')
DATA_LOAD_LOCK = threading.Lock()
"""Block many concurrent data loads at once."""
LLAMA_FILE_LOCK = threading.Lock()
"""Lock to prevent concurrent updates of the same index - needed in case we have more than one request processing."""
INDEX_BUCKET: str = solution.getenv('EMBEDDINGS_BUCKET_NAME')
"""Location to download llama-index embeddings from."""
LAST_LOCAL_INDEX_UPDATE: datetime | None = None
"""Keep track of the most recent local index update to avoid unnecessary refreshes."""
if solution.LOCAL_DEVELOPMENT_MODE:
LLAMA_INDEX_DIR: str = 'dev/tmp/llamaindex-embeddings'
else:
LLAMA_INDEX_DIR = 'tmp/llamaindex-embeddings'
LOCAL_DEV_DATA_DIR: str = 'dev/tmp'
"""Location of the local data directory for development on local machine."""
@log
def _get_llm(provider: constants.LlmProvider) -> LLMPredictor:
"""Return LLM predictor."""
if provider == constants.LlmProvider.OPEN_AI:
llm = LLMPredictor(llm=OpenAIChat(temperature=constants.TEMPERATURE, model_name=constants.GPT_MODEL)) # type: ignore
else:
raise ValueError(f'Unknown LLM provider: {provider}')
return llm
@log_params
def load_resumes(resume_dir: str | None) -> dict[str, List[Document]]:
"""Initialize list of resumes from index storage or from the directory with PDF source files."""
resumes: dict[str, List[Document]] = {}
if resume_dir is None:
resume_dir = ''
resume_path = Path(resume_dir)
index_path = Path(LLAMA_INDEX_DIR)
global DATA_LOAD_LOCK
with DATA_LOAD_LOCK:
if index_path.exists():
logger.info('Loading people names (not resumes) from existing index storage...')
names = glob.glob(f'{index_path}/*',)
if len(names):
for file_name in names:
# We do not care about the contents of the resume because it will be loaded from index
# All we care for here is the name - aka the Key, not Value
resumes[Path(file_name).name] = []
return resumes
else:
logger.warning('No resumes found in the index directory: %s', index_path)
logger.warning('Removing the index storage directory: %s', index_path)
Path.rmdir(index_path)
logger.info('Loading people names from the source dir with resume PDF files...')
Path.mkdir(resume_path, parents=True, exist_ok=True)
# Check if there are any pdf files in the data directory
pdf_files = glob.glob(f'{resume_path}/*.pdf')
if len(pdf_files):
# Each resume shall be named as '<person_name>.pdf' optionally with 'resume' suffix
for resume in pdf_files:
person_name = os.path.basename(resume).replace('.pdf', '').replace(
'Resume', '').replace('resume', '').replace('_', ' ').strip()
logger.debug(f'Loading: {person_name}')
resume_content = SimpleDirectoryReader(input_files=[resume]).load_data()
resumes[person_name] = resume_content
else:
logger.warning('No resume PDF files found in the data directory: %s', resume_path)
return resumes
@log
def _load_resume_indices(resumes: dict[str, List[Document]],
service_context: ServiceContext, embeddings_dir: str) -> dict[str, GPTVectorStoreIndex]:
"""Load or create index storage contexts for each person in the resumes list."""
vector_indices = {}
for person_name, resume_data in resumes.items():
cache_file_path = Path(f'./{embeddings_dir}/{person_name}')
if cache_file_path.exists():
logger.debug('Loading index from storage file: %s', cache_file_path)
storage_context = StorageContext.from_defaults(persist_dir=str(cache_file_path))
vector_indices[person_name] = load_index_from_storage(storage_context=storage_context)
else:
storage_context = StorageContext.from_defaults()
# build vector index
vector_indices[person_name] = GPTVectorStoreIndex.from_documents(
resume_data,
service_context=service_context,
storage_context=storage_context,
)
# set id for vector index
# vector_indices[person_name].index_struct.index_id = person_name
vector_indices[person_name].set_index_id(person_name)
logger.debug('Saving index to storage file: %s', cache_file_path)
storage_context.persist(persist_dir=str(cache_file_path))
# ------------------- Test
# name = 'Roman Kharkovski'
# test_query = f'What are the main skills for {name}?'
# logger.debug('Test query: %s', test_query)
# response = vector_indices[f'{name}'].as_query_engine().query(test_query)
# logger.debug('Response: %s', str(response))
# exit(0)
# ------------------- end of test
return vector_indices # type: ignore
@log
def _load_resume_index_summary(resumes: dict[str, Any]) -> dict[str, str]:
index_summaries = {}
for person_name in resumes.keys():
# index_summaries[person_name] = (f'Use this index if you need to lookup specific facts about {person_name}.')
index_summaries[person_name] = (f'This content contains resume of {person_name}.\n'
f'Use this index if you need to lookup specific facts about {person_name}.\n'
'Do not confuse people with the same lastname, but different first names.'
'If you cant find the answer, respond with the best of your knowledge.'
'Do not use this index if you want to analyze multiple people.')
return index_summaries
@log_params
def generate_embeddings(resume_dir: str, provider: constants.LlmProvider) -> None:
"""Generate embeddings from PDF resumes."""
resumes = load_resumes(resume_dir=resume_dir)
if not resumes:
return None
predictor = _get_llm(provider=provider)
context = ServiceContext.from_defaults(llm_predictor=predictor, chunk_size_limit=constants.CHUNK_SIZE)
_load_resume_indices(resumes=resumes, service_context=context, embeddings_dir=LLAMA_INDEX_DIR)
@log_params
def _get_resume_query_engine(provider: constants.LlmProvider, resume_dir: str | None = None) -> BaseQueryEngine | None:
"""Load the index from disk, or build it if it doesn't exist."""
llm = _get_llm(provider=provider)
service_context = ServiceContext.from_defaults(llm_predictor=llm, chunk_size_limit=constants.CHUNK_SIZE)
resumes: dict[str, List[Document]] = load_resumes(resume_dir=resume_dir)
logger.debug('-------------------------- resumes: %s', resumes.keys())
if not resumes:
return None
# vector_indices = load_resume_indices(resumes, service_context)
vector_indices = _load_resume_indices(resumes=resumes, service_context=service_context,
embeddings_dir=LLAMA_INDEX_DIR)
index_summaries = _load_resume_index_summary(resumes)
graph = ComposableGraph.from_indices(root_index_cls=GPTSimpleKeywordTableIndex,
children_indices=[index for _, index in vector_indices.items()],
index_summaries=[summary for _, summary in index_summaries.items()],
max_keywords_per_chunk=constants.MAX_KEYWORDS_PER_CHUNK)
# root_index = graph.get_index(graph.root_id)
root_index = graph.get_index(index_struct_id=graph.root_id)
root_index.set_index_id('compare_contrast')
graph.index_struct.summary = ('This index contains resumes of multiple people. '
'Do not confuse people with the same lastname, but different first names.'
'Use this index if you want to compare multiple people.')
decompose_transform = DecomposeQueryTransform(llm, verbose=True)
custom_query_engines = {}
for index in vector_indices.values():
query_engine = index.as_query_engine(service_context=service_context,
similarity_top_k=constants.SIMILARITY_TOP_K)
query_engine = TransformQueryEngine(query_engine=query_engine,
query_transform=decompose_transform,
transform_metadata={'index_summary': index.index_struct.summary},
) # type: ignore
custom_query_engines[index.index_id] = query_engine
custom_query_engines[graph.root_id] = graph.root_index.as_query_engine(
retriever_mode='simple',
response_mode='tree_summarize',
service_context=service_context,
verbose=True,
use_async=True,
)
graph_query_engine = graph.as_query_engine(custom_query_engines=custom_query_engines)
# ------------------- Test
# name1 = 'Roman Kharkovski'
# name2 = 'Steven Kim'
# response = graph_query_engine.query(f'Compare and contrast the skills of {name1} and {name2}.')
# logger.debug('Response: %s', str(response))
# ------------------- end of test
return graph_query_engine
# TODO: the query engine tool does not longer work - need to debug
# query_engine_tools = []
# # add vector index tools
# for person_name in resumes.keys():
# index = vector_indices[person_name]
# summary = index_summaries[person_name]
# query_engine = index.as_query_engine(service_context=service_context)
# vector_tool = QueryEngineTool.from_defaults(query_engine=query_engine, description=summary)
# query_engine_tools.append(vector_tool)
# # add graph tool
# graph_tool = QueryEngineTool.from_defaults(graph_query_engine, description=graph.index_struct.summary)
# query_engine_tools.append(graph_tool)
# router_query_engine = RouterQueryEngine.from_defaults(selector=LLMSingleSelector.from_defaults(
# service_context=service_context), query_engine_tools=query_engine_tools)
# return router_query_engine
@cache
@log
def _refresh_llama_index() -> None:
"""Refresh the index of resumes from the database using Llama-Index."""
global LAST_LOCAL_INDEX_UPDATE
if solution.LOCAL_DEVELOPMENT_MODE:
logger.info('Running in local development mode')
index_path = Path(LLAMA_INDEX_DIR)
if not index_path.exists():
# TODO - need to generate proper embeddings for each provider, not hard coded
generate_embeddings(resume_dir=LOCAL_DEV_DATA_DIR, provider=constants.LlmProvider.OPEN_AI)
return
global LLAMA_FILE_LOCK
last_resume_refresh = admin_dao.AdminDAO().get_resumes_timestamp()
if LAST_LOCAL_INDEX_UPDATE is None or LAST_LOCAL_INDEX_UPDATE < last_resume_refresh:
logger.info('Refreshing local index of resumes...')
# Prevent concurrent updates of the same index - needed in case we have more than one request processing
with LLAMA_FILE_LOCK:
# Check for condition again because the index may have been updated while we were waiting for the lock
if LAST_LOCAL_INDEX_UPDATE is None or LAST_LOCAL_INDEX_UPDATE < last_resume_refresh:
gcs_tools.download(bucket_name=INDEX_BUCKET, local_dir=LLAMA_INDEX_DIR)
return last_resume_refresh
logger.info('Skipping refresh of resumes index because no changes in source resumes were detected.')
LAST_LOCAL_INDEX_UPDATE = last_resume_refresh
@log
def query(question: str) -> str:
"""Run LLM query for CHatGPT."""
_refresh_llama_index()
query_engine = _get_resume_query_engine(provider=constants.LlmProvider.OPEN_AI)
if query_engine is None:
raise SystemError('No resumes found in the database. Please upload resumes.')
return str(query_engine.query(question))
| [
"llama_index.query_engine.transform_query_engine.TransformQueryEngine",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.indices.query.query_transform.base.DecomposeQueryTransform",
"llama_index.load_index_from_storage"
] | [((1710, 1726), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1724, 1726), False, 'import threading\n'), ((1794, 1810), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1808, 1810), False, 'import threading\n'), ((1950, 1991), 'common.solution.getenv', 'solution.getenv', (['"""EMBEDDINGS_BUCKET_NAME"""'], {}), "('EMBEDDINGS_BUCKET_NAME')\n", (1965, 1991), False, 'from common import admin_dao, constants, gcs_tools, solution\n'), ((3114, 3130), 'pathlib.Path', 'Path', (['resume_dir'], {}), '(resume_dir)\n', (3118, 3130), False, 'from pathlib import Path\n'), ((3148, 3169), 'pathlib.Path', 'Path', (['LLAMA_INDEX_DIR'], {}), '(LLAMA_INDEX_DIR)\n', (3152, 3169), False, 'from pathlib import Path\n'), ((7800, 7897), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'predictor', 'chunk_size_limit': 'constants.CHUNK_SIZE'}), '(llm_predictor=predictor, chunk_size_limit=\n constants.CHUNK_SIZE)\n', (7828, 7897), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((8255, 8346), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm', 'chunk_size_limit': 'constants.CHUNK_SIZE'}), '(llm_predictor=llm, chunk_size_limit=constants.\n CHUNK_SIZE)\n', (8283, 8346), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((9702, 9744), 'llama_index.indices.query.query_transform.base.DecomposeQueryTransform', 'DecomposeQueryTransform', (['llm'], {'verbose': '(True)'}), '(llm, verbose=True)\n', (9725, 9744), False, 'from llama_index.indices.query.query_transform.base import DecomposeQueryTransform\n'), ((1631, 1647), 'common.log.Logger', 'Logger', (['__name__'], {}), '(__name__)\n', (1637, 1647), False, 'from common.log import Logger, log, log_params\n'), ((4069, 4121), 'pathlib.Path.mkdir', 'Path.mkdir', (['resume_path'], {'parents': '(True)', 'exist_ok': '(True)'}), '(resume_path, parents=True, exist_ok=True)\n', (4079, 4121), False, 'from pathlib import Path\n'), ((4208, 4241), 'glob.glob', 'glob.glob', (['f"""{resume_path}/*.pdf"""'], {}), "(f'{resume_path}/*.pdf')\n", (4217, 4241), False, 'import glob\n'), ((5266, 5307), 'pathlib.Path', 'Path', (['f"""./{embeddings_dir}/{person_name}"""'], {}), "(f'./{embeddings_dir}/{person_name}')\n", (5270, 5307), False, 'from pathlib import Path\n'), ((10009, 10169), 'llama_index.query_engine.transform_query_engine.TransformQueryEngine', 'TransformQueryEngine', ([], {'query_engine': 'query_engine', 'query_transform': 'decompose_transform', 'transform_metadata': "{'index_summary': index.index_struct.summary}"}), "(query_engine=query_engine, query_transform=\n decompose_transform, transform_metadata={'index_summary': index.\n index_struct.summary})\n", (10029, 10169), False, 'from llama_index.query_engine.transform_query_engine import TransformQueryEngine\n'), ((12194, 12215), 'pathlib.Path', 'Path', (['LLAMA_INDEX_DIR'], {}), '(LLAMA_INDEX_DIR)\n', (12198, 12215), False, 'from pathlib import Path\n'), ((3366, 3394), 'glob.glob', 'glob.glob', (['f"""{index_path}/*"""'], {}), "(f'{index_path}/*')\n", (3375, 3394), False, 'import glob\n'), ((5561, 5617), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context'}), '(storage_context=storage_context)\n', (5584, 5617), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((5662, 5692), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (5690, 5692), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((5768, 5886), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['resume_data'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(resume_data, service_context=\n service_context, storage_context=storage_context)\n', (5802, 5886), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((12514, 12534), 'common.admin_dao.AdminDAO', 'admin_dao.AdminDAO', ([], {}), '()\n', (12532, 12534), False, 'from common import admin_dao, constants, gcs_tools, solution\n'), ((2633, 2710), 'langchain.llms.openai.OpenAIChat', 'OpenAIChat', ([], {'temperature': 'constants.TEMPERATURE', 'model_name': 'constants.GPT_MODEL'}), '(temperature=constants.TEMPERATURE, model_name=constants.GPT_MODEL)\n', (2643, 2710), False, 'from langchain.llms.openai import OpenAIChat\n'), ((3948, 3970), 'pathlib.Path.rmdir', 'Path.rmdir', (['index_path'], {}), '(index_path)\n', (3958, 3970), False, 'from pathlib import Path\n'), ((13079, 13150), 'common.gcs_tools.download', 'gcs_tools.download', ([], {'bucket_name': 'INDEX_BUCKET', 'local_dir': 'LLAMA_INDEX_DIR'}), '(bucket_name=INDEX_BUCKET, local_dir=LLAMA_INDEX_DIR)\n', (13097, 13150), False, 'from common import admin_dao, constants, gcs_tools, solution\n'), ((4658, 4701), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[resume]'}), '(input_files=[resume])\n', (4679, 4701), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((3679, 3694), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (3683, 3694), False, 'from pathlib import Path\n'), ((4433, 4457), 'os.path.basename', 'os.path.basename', (['resume'], {}), '(resume)\n', (4449, 4457), False, 'import os\n')] |
# The MIT License
# Copyright (c) Jerry Liu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""OpenDAL file and directory reader.
A loader that fetches a file or iterates through a directory on a object store like AWS S3 or AzureBlob.
"""
import asyncio
import logging as log
import tempfile
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Self, Type, Union, cast
import opendal
from llama_index.readers.base import BaseReader
from llama_index.readers.file.docs_reader import DocxReader, PDFReader
from llama_index.readers.file.epub_reader import EpubReader
from llama_index.readers.file.image_reader import ImageReader
from llama_index.readers.file.ipynb_reader import IPYNBReader
from llama_index.readers.file.markdown_reader import MarkdownReader
from llama_index.readers.file.mbox_reader import MboxReader
from llama_index.readers.file.slides_reader import PptxReader
from llama_index.readers.file.tabular_reader import PandasCSVReader
from llama_index.readers.file.video_audio_reader import VideoAudioReader
from llama_index.schema import Document
from .... import services
from ....domain import DocumentListItem
DEFAULT_FILE_READER_CLS: Dict[str, Type[BaseReader]] = {
".pdf": PDFReader,
".docx": DocxReader,
".pptx": PptxReader,
".jpg": ImageReader,
".png": ImageReader,
".jpeg": ImageReader,
".mp3": VideoAudioReader,
".mp4": VideoAudioReader,
".csv": PandasCSVReader,
".epub": EpubReader,
".md": MarkdownReader,
".mbox": MboxReader,
".ipynb": IPYNBReader,
}
FILE_MIME_EXTENSION_MAP: Dict[str, str] = {
"application/pdf": ".pdf",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
"application/vnd.google-apps.document": ".gdoc",
"application/vnd.google-apps.presentation": ".gslides",
"application/vnd.google-apps.spreadsheet": ".gsheet",
"image/jpeg": ".jpg",
"image/png": ".png",
"image/jpg": ".jpg",
"audio/mpeg": ".mp3",
"audio/mp3": ".mp3",
"video/mp4": ".mp4",
"video/mpeg": ".mp4",
"text/csv": ".csv",
"application/epub+zip": ".epub",
"text/markdown": ".md",
"application/x-ipynb+json": ".ipynb",
"application/mbox": ".mbox",
}
class OpendalReader(BaseReader):
"""General reader for any opendal operator."""
def __init__(
self: Self,
scheme: str,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
**kwargs: Optional[dict[str, Any]],
) -> None:
"""Initialize opendal operator, along with credentials if needed.
Args:
scheme (str): the scheme of the service
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loader will load the file.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. NOTE: this isn't implemented yet.
file_metadata (Optional[Callable[[str], Dict]]): A function that takes a source file path and returns a dictionary of metadata to be added to the Document object.
**kwargs (Optional dict[str, any]): Additional arguments to pass to the `opendal.AsyncOperator` constructor. These are the scheme (object store) specific options.
"""
super().__init__()
self.path = path
self.file_metadata = file_metadata
self.supported_suffix = list(DEFAULT_FILE_READER_CLS.keys())
self.async_op = opendal.AsyncOperator(scheme, **kwargs)
if file_extractor is not None:
self.file_extractor = file_extractor
else:
self.file_extractor = {}
self.documents: List[Document] = []
def load_data(self: Self) -> List[Document]:
"""Load file(s) from OpenDAL."""
# TODO: think about the private and secure aspect of this temp folder.
# NOTE: the following code cleans up the temp folder when existing the context.
with tempfile.TemporaryDirectory() as temp_dir:
if not self.path.endswith("/"):
result = asyncio.run(
download_file_from_opendal(self.async_op, temp_dir, self.path, file_metadata=self.file_metadata)
)
self.downloaded_files.append(result)
else:
self.downloaded_files = asyncio.run(download_dir_from_opendal(self.async_op, temp_dir, self.path))
self.documents = asyncio.run(
extract_files(
self.downloaded_files, file_extractor=self.file_extractor, file_metadata=self.file_metadata
)
)
return self.documents
def get_document_list(self: Self) -> List[DocumentListItem]:
"""Get a list of all documents in the index. A document is a list are 1:1 with a file."""
dl: List[DocumentListItem] = []
try:
for df in self.downloaded_files:
dl.append(DocumentListItem(link=df[0], indexed_on=df[2], size=df[3]))
except Exception as e:
log.exception("Converting Document list to DocumentListItem list failed: %s", e)
return dl
class FileStorageBaseReader(BaseReader):
"""File storage reader."""
def __init__(
self: Self,
access_token: dict,
root: str,
selected_folder_id: Optional[str] = None,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
**kwargs: Optional[dict[str, Any]],
) -> None:
"""Initialize File storage service reader.
Args:
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loader will load the file.
access_token (dict): the access token for the google drive service
root (str): the root folder to start the iteration
selected_folder_id (Optional[str] = None): the selected folder id
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. NOTE: this isn't implemented yet.
file_metadata (Optional[Callable[[str], Dict]]): A function that takes a source file path and returns a dictionary of metadata to be added to the Document object.
kwargs (Optional dict[str, any]): Additional arguments to pass to the specific file storage service.
"""
super().__init__()
self.path = path
self.file_extractor = file_extractor if file_extractor is not None else {}
self.supported_suffix = list(DEFAULT_FILE_READER_CLS.keys())
self.access_token = access_token
self.root = root
self.file_metadata = file_metadata
self.selected_folder_id = selected_folder_id
self.documents: List[Document] = []
self.kwargs = kwargs
self.downloaded_files: List[tuple[str, str, int, int]] = []
def load_data(self: Self) -> List[Document]:
"""Load file(s) from file storage."""
raise NotImplementedError
def get_document_list(self: Self) -> List[DocumentListItem]:
"""Get a list of all documents in the index. A document is a list are 1:1 with a file."""
dl: List[DocumentListItem] = []
try:
for df in self.downloaded_files:
dl.append(DocumentListItem(link=df[0], indexed_on=df[2], size=df[3]))
except Exception as e:
log.exception("Converting Document list to DocumentListItem list failed: %s", e)
return dl
# TODO: Tobe removed once opendal starts supporting Google Drive.
class GoogleDriveReader(FileStorageBaseReader):
"""Google Drive reader."""
def __init__(
self: Self,
access_token: dict,
root: str,
selected_folder_id: Optional[str] = None,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize Google Drive reader."""
super().__init__(
access_token=access_token,
root=root,
selected_folder_id=selected_folder_id,
path=path,
file_extractor=file_extractor,
file_metadata=file_metadata,
)
def load_data(self: Self) -> List[Document]:
"""Load file(s) from Google Drive."""
service = services.google_drive.get_drive_service(self.access_token)
id_ = self.selected_folder_id if self.selected_folder_id is not None else "root"
folder_content = service.files().list(
q=f"'{id_}' in parents and trashed=false",
fields="files(id, name, parents, mimeType, modifiedTime, webViewLink, webContentLink, size, fullFileExtension)",
).execute()
files = folder_content.get("files", [])
with tempfile.TemporaryDirectory() as temp_dir:
self.downloaded_files = asyncio.run(
download_from_gdrive(files, temp_dir, service)
)
self.documents = asyncio.run(
extract_files(
self.downloaded_files, file_extractor=self.file_extractor, file_metadata=self.file_metadata
)
)
return self.documents
class OneDriveReader(FileStorageBaseReader):
"""OneDrive reader."""
def __init__(
self: Self,
access_token: dict,
root: str,
selected_folder_id: Optional[str] = None,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize OneDrive reader."""
super().__init__(
access_token=access_token,
root=root,
selected_folder_id=selected_folder_id,
path=path,
file_extractor=file_extractor,
file_metadata=file_metadata,
)
def load_data(self: Self) -> List[Document]:
"""Load file(s) from OneDrive."""
client = services.ms_onedrive.get_client(self.access_token)
id_ = self.selected_folder_id if self.selected_folder_id is not None else "/drive/root:"
if client is not None:
response = client.files.drive_specific_folder(id_, {
"$select": "id,name,file,size,webUrl",
"$filter": "file ne null",
"$top": 100, # Limiting to a maximum of 100 files for now.
})
files = response.data.get("value", [])
with tempfile.TemporaryDirectory() as temp_dir:
self.downloaded_files = asyncio.run(
download_from_onedrive(files, temp_dir, client)
)
self.documents = asyncio.run(
extract_files(
self.downloaded_files, file_extractor=self.file_extractor, file_metadata=self.file_metadata
)
)
return self.documents
async def download_from_onedrive(files: List[dict], temp_dir: str, client: Any,) -> List[tuple[str, str, int, int]]:
"""Download files from OneDrive."""
downloaded_files: List[tuple[str, str, int, int]] = []
for file in files:
suffix = Path(file["name"]).suffix
if suffix not in DEFAULT_FILE_READER_CLS:
log.debug("file suffix not supported: %s", suffix)
continue
file_path = f"{temp_dir}/{file['name']}"
indexed_on = datetime.timestamp(datetime.now().utcnow())
await asyncio.to_thread(
services.ms_onedrive.download_file, client, file["id"], file_path
)
downloaded_files.append(
(file["webUrl"], file_path, int(indexed_on), int(file["size"]))
)
return downloaded_files
async def download_from_gdrive(files: List[dict], temp_dir: str, service: Any,) -> List[tuple[str, str, int, int]]:
"""Download files from Google Drive."""
downloaded_files: List[tuple[str, str, int, int]] = []
for file in files:
if file["mimeType"] == "application/vnd.google-apps.folder":
# TODO: Implement recursive folder download
continue
suffix = FILE_MIME_EXTENSION_MAP.get(file["mimeType"], None)
if suffix not in DEFAULT_FILE_READER_CLS:
continue
file_path = f"{temp_dir}/{file['name']}"
indexed_on = datetime.timestamp(datetime.now().utcnow())
await asyncio.to_thread(
services.google_drive.download_file, service, file["id"], file_path, file["mimeType"]
)
downloaded_files.append(
(file["webViewLink"], file_path, int(indexed_on), int(file["size"]))
)
return downloaded_files
async def download_file_from_opendal(op: Any, temp_dir: str, path: str) -> tuple[str, int, int]:
"""Download file from OpenDAL."""
import opendal
log.debug("downloading file using OpenDAL: %s", path)
op = cast(opendal.AsyncOperator, op)
suffix = Path(path).suffix
filepath = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}" # type: ignore
file_size = 0
indexed_on = datetime.timestamp(datetime.now().utcnow())
async with op.open_reader(path) as r:
with open(filepath, "wb") as w:
b = await r.read()
w.write(b)
file_size = len(b)
return (filepath, int(indexed_on), file_size)
async def download_dir_from_opendal(
op: Any,
temp_dir: str,
download_dir: str,
) -> List[tuple[str, str, int, int]]:
"""Download directory from opendal.
Args:
op: opendal operator
temp_dir: temp directory to store the downloaded files
download_dir: directory to download
supported_suffix: list of supported file suffixes
file_extractor: A mapping of file extractors to use for specific file types.
file_metadata: A function that takes a file path and returns a dictionary of metadata to be added to the Document object.
Returns:
a list of tuples of 'source path' and 'local path'.
"""
import opendal
log.debug("downloading dir using OpenDAL: %s", download_dir)
downloaded_files: List[tuple[str, str, int, int]] = []
op = cast(opendal.AsyncOperator, op)
objs = await op.scan(download_dir)
async for obj in objs:
filepath, indexed_on, size = await download_file_from_opendal(op, temp_dir, obj.path)
downloaded_files.append((obj.path, filepath, indexed_on, size)) # source path, local path
return downloaded_files
async def extract_files(
downloaded_files: List[tuple[str, str, int, int]],
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
) -> List[Document]:
"""Extract content of a list of files."""
documents: List[Document] = []
tasks = []
log.debug("number files to extract: %s", len(downloaded_files))
for fe in downloaded_files:
source_path = fe[0]
local_path = fe[1]
metadata = None
if file_metadata is not None:
metadata = file_metadata(source_path)
# TODO: this likely will not scale very much. We'll have to refactor to control the number of tasks.
task = asyncio.create_task(
extract_file(Path(local_path), filename_as_id=True, file_extractor=file_extractor, metadata=metadata)
)
tasks.append(task)
log.debug("extract task created for: %s", local_path)
log.debug("extract file - tasks started: %s", len(tasks))
results = await asyncio.gather(*tasks)
log.debug("extract file - tasks completed: %s", len(results))
for result in results:
# combine into a single Document list
documents.extend(result)
return documents
async def extract_file(
file_path: Path,
filename_as_id: bool = False,
errors: str = "ignore",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
metadata: Optional[Dict] = None,
) -> List[Document]:
"""Extract content of a file on disk.
Args:
file_path (str): path to the file
filename_as_id (bool): whether to use the filename as the document id
errors (str): how to handle errors when reading the file
supported_suffix (Optional[List[str]]): list of supported file suffixes
file_extractor (Optional[Dict[str, Union[str, BaseReader]]] = None): A mapping of file extractors to use for specific file types.
metadata (Optional[Dict] = None): metadata to add to the document. This will be appended to any metadata generated by the file extension specific extractor.
Returns:
List[Document]: list of documents containing the content of the file, one Document object per page.
"""
documents: List[Document] = []
file_suffix = file_path.suffix.lower()
supported_suffix = list(DEFAULT_FILE_READER_CLS.keys())
if file_suffix in supported_suffix:
log.debug("file extractor found for file_suffix: %s", file_suffix)
# NOTE: pondering if its worth turning this into a class and uncomment the code above so reader classes are only instantiated once.
reader = DEFAULT_FILE_READER_CLS[file_suffix]()
docs = reader.load_data(file_path, extra_info=metadata)
# iterate over docs if needed
if filename_as_id:
for i, doc in enumerate(docs):
doc.id_ = f"{str(file_path)}_part_{i}"
documents.extend(docs)
else:
log.debug("file extractor not found for file_suffix: %s", file_suffix)
# do standard read
with open(file_path, "r", errors=errors, encoding="utf8") as f:
data = f.read()
doc = Document(text=data, extra_info=metadata or {})
if filename_as_id:
doc.id_ = str(file_path)
documents.append(doc)
return documents
| [
"llama_index.schema.Document"
] | [((14702, 14755), 'logging.debug', 'log.debug', (['"""downloading file using OpenDAL: %s"""', 'path'], {}), "('downloading file using OpenDAL: %s', path)\n", (14711, 14755), True, 'import logging as log\n'), ((14765, 14796), 'typing.cast', 'cast', (['opendal.AsyncOperator', 'op'], {}), '(opendal.AsyncOperator, op)\n', (14769, 14796), False, 'from typing import Any, Callable, Dict, List, Optional, Self, Type, Union, cast\n'), ((15914, 15974), 'logging.debug', 'log.debug', (['"""downloading dir using OpenDAL: %s"""', 'download_dir'], {}), "('downloading dir using OpenDAL: %s', download_dir)\n", (15923, 15974), True, 'import logging as log\n'), ((16043, 16074), 'typing.cast', 'cast', (['opendal.AsyncOperator', 'op'], {}), '(opendal.AsyncOperator, op)\n', (16047, 16074), False, 'from typing import Any, Callable, Dict, List, Optional, Self, Type, Union, cast\n'), ((4988, 5027), 'opendal.AsyncOperator', 'opendal.AsyncOperator', (['scheme'], {}), '(scheme, **kwargs)\n', (5009, 5027), False, 'import opendal\n'), ((14811, 14821), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (14815, 14821), False, 'from pathlib import Path\n'), ((17265, 17318), 'logging.debug', 'log.debug', (['"""extract task created for: %s"""', 'local_path'], {}), "('extract task created for: %s', local_path)\n", (17274, 17318), True, 'import logging as log\n'), ((17403, 17425), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (17417, 17425), False, 'import asyncio\n'), ((18800, 18866), 'logging.debug', 'log.debug', (['"""file extractor found for file_suffix: %s"""', 'file_suffix'], {}), "('file extractor found for file_suffix: %s', file_suffix)\n", (18809, 18866), True, 'import logging as log\n'), ((19342, 19412), 'logging.debug', 'log.debug', (['"""file extractor not found for file_suffix: %s"""', 'file_suffix'], {}), "('file extractor not found for file_suffix: %s', file_suffix)\n", (19351, 19412), True, 'import logging as log\n'), ((19555, 19601), 'llama_index.schema.Document', 'Document', ([], {'text': 'data', 'extra_info': '(metadata or {})'}), '(text=data, extra_info=metadata or {})\n', (19563, 19601), False, 'from llama_index.schema import Document\n'), ((5485, 5514), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5512, 5514), False, 'import tempfile\n'), ((10635, 10664), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (10662, 10664), False, 'import tempfile\n'), ((13058, 13076), 'pathlib.Path', 'Path', (["file['name']"], {}), "(file['name'])\n", (13062, 13076), False, 'from pathlib import Path\n'), ((13146, 13196), 'logging.debug', 'log.debug', (['"""file suffix not supported: %s"""', 'suffix'], {}), "('file suffix not supported: %s', suffix)\n", (13155, 13196), True, 'import logging as log\n'), ((13346, 13434), 'asyncio.to_thread', 'asyncio.to_thread', (['services.ms_onedrive.download_file', 'client', "file['id']", 'file_path'], {}), "(services.ms_onedrive.download_file, client, file['id'],\n file_path)\n", (13363, 13434), False, 'import asyncio\n'), ((14261, 14369), 'asyncio.to_thread', 'asyncio.to_thread', (['services.google_drive.download_file', 'service', "file['id']", 'file_path', "file['mimeType']"], {}), "(services.google_drive.download_file, service, file['id'],\n file_path, file['mimeType'])\n", (14278, 14369), False, 'import asyncio\n'), ((6571, 6656), 'logging.exception', 'log.exception', (['"""Converting Document list to DocumentListItem list failed: %s"""', 'e'], {}), "('Converting Document list to DocumentListItem list failed: %s', e\n )\n", (6584, 6656), True, 'import logging as log\n'), ((9201, 9286), 'logging.exception', 'log.exception', (['"""Converting Document list to DocumentListItem list failed: %s"""', 'e'], {}), "('Converting Document list to DocumentListItem list failed: %s', e\n )\n", (9214, 9286), True, 'import logging as log\n'), ((12349, 12378), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (12376, 12378), False, 'import tempfile\n'), ((14863, 14894), 'tempfile._get_candidate_names', 'tempfile._get_candidate_names', ([], {}), '()\n', (14892, 14894), False, 'import tempfile\n'), ((14975, 14989), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14987, 14989), False, 'from datetime import datetime\n'), ((17131, 17147), 'pathlib.Path', 'Path', (['local_path'], {}), '(local_path)\n', (17135, 17147), False, 'from pathlib import Path\n'), ((13307, 13321), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13319, 13321), False, 'from datetime import datetime\n'), ((14222, 14236), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14234, 14236), False, 'from datetime import datetime\n')] |
from langchain.agents import (
initialize_agent,
Tool,
AgentType
)
from llama_index.callbacks import (
CallbackManager,
LlamaDebugHandler
)
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index import (
VectorStoreIndex,
SummaryIndex,
SimpleDirectoryReader,
ServiceContext,
StorageContext,
)
import os
import openai
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def init_llm_from_env(temperature=0.1, max_tokens=1024):
llm_type = os.getenv("LLM")
if llm_type == 'openai':
from langchain.chat_models import ChatOpenAI
openai.api_key = os.getenv("OPENAI_API_KEY")
llm = ChatOpenAI(temperature=temperature,
model_name="gpt-3.5-turbo",
max_tokens=max_tokens)
elif llm_type == 'xinference':
from langchain.llms import Xinference
llm = Xinference(
server_url=os.getenv("XINFERENCE_SERVER_ENDPOINT"),
model_uid=os.getenv("XINFERENCE_LLM_MODEL_UID")
)
else:
raise ValueError(f"Unknown LLM type {llm_type}")
return llm
def init_embedding_from_env(temperature=0.1, max_tokens=1024):
embedding_type = os.getenv("EMBEDDING")
if embedding_type == 'openai':
from llama_index.embeddings import OpenAIEmbedding
openai.api_key = os.getenv("OPENAI_API_KEY")
embedding = OpenAIEmbedding()
elif embedding_type == 'xinference':
from langchain.embeddings import XinferenceEmbeddings
from llama_index.embeddings import LangchainEmbedding
embedding = LangchainEmbedding(
XinferenceEmbeddings(
server_url=os.getenv("XINFERENCE_SERVER_ENDPOINT"),
model_uid=os.getenv("XINFERENCE_EMBEDDING_MODEL_UID")
)
)
else:
raise ValueError(f"Unknown EMBEDDING type {embedding_type}")
return embedding
def get_service_context(callback_handlers):
callback_manager = CallbackManager(callback_handlers)
node_parser = SimpleNodeParser.from_defaults(
chunk_size=512,
chunk_overlap=128,
callback_manager=callback_manager,
)
return ServiceContext.from_defaults(
embed_model=init_embedding_from_env(),
callback_manager=callback_manager,
llm=init_llm_from_env(),
chunk_size=512,
node_parser=node_parser
)
def get_storage_context():
return StorageContext.from_defaults()
def get_langchain_agent_from_index(summary_index, vector_index):
list_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = vector_index.as_query_engine(
similarity_top_k=3
)
tools = [
Tool(
name="Summary Tool",
func=lambda q: str(list_query_engine.query(q)),
description="useful for when you want to get summarizations",
return_direct=True,
),
Tool(
name="Lookup Tool",
func=lambda q: str(vector_query_engine.query(q)),
description="useful for when you want to lookup detailed information",
return_direct=True,
),
]
agent_chain = initialize_agent(
tools,
init_llm_from_env(),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
return agent_chain
def get_query_engine_from_index(index):
return index.as_query_engine(
similarity_top_k=3
)
def get_chat_engine_from_index(index):
return index.as_chat_engine(chat_mode="condense_question", verbose=True)
class ChatEngine:
def __init__(self, file_path):
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
service_context = get_service_context([llama_debug])
storage_context = get_storage_context()
documents = SimpleDirectoryReader(input_files=[file_path], filename_as_id=True).load_data()
logging.info(f"Loaded {len(documents)} documents from {file_path}")
nodes = service_context.node_parser.get_nodes_from_documents(documents)
storage_context.docstore.add_documents(nodes)
logging.info(f"Adding {len(nodes)} nodes to storage")
self.summary_index = SummaryIndex(nodes, storage_context=storage_context,
service_context=service_context)
self.vector_index = VectorStoreIndex(nodes, storage_context=storage_context,
service_context=service_context)
# def conversational_chat(self, query, callback_handler):
# """
# Start a conversational chat with a agent
# """
# response = self.agent_chain.run(input=query, callbacks=[callback_handler])
# return response
def conversational_chat(self, query, callback_handler):
"""
Start a conversational chat with a agent
"""
return get_chat_engine_from_index(self.vector_index).chat(query).response | [
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.StorageContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults",
"llama_index.callbacks.CallbackManager",
"llama_index.SummaryIndex",
"llama_index.embeddings.OpenAIEmbedding"
] | [((398, 456), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (417, 456), False, 'import logging\n'), ((529, 545), 'os.getenv', 'os.getenv', (['"""LLM"""'], {}), "('LLM')\n", (538, 545), False, 'import os\n'), ((1217, 1239), 'os.getenv', 'os.getenv', (['"""EMBEDDING"""'], {}), "('EMBEDDING')\n", (1226, 1239), False, 'import os\n'), ((1961, 1995), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['callback_handlers'], {}), '(callback_handlers)\n', (1976, 1995), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler\n'), ((2014, 2118), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(512)', 'chunk_overlap': '(128)', 'callback_manager': 'callback_manager'}), '(chunk_size=512, chunk_overlap=128,\n callback_manager=callback_manager)\n', (2044, 2118), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((2411, 2441), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2439, 2441), False, 'from llama_index import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((649, 676), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (658, 676), False, 'import os\n'), ((689, 780), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'max_tokens'}), "(temperature=temperature, model_name='gpt-3.5-turbo', max_tokens=\n max_tokens)\n", (699, 780), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1355, 1382), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1364, 1382), False, 'import os\n'), ((1401, 1418), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1416, 1418), False, 'from llama_index.embeddings import OpenAIEmbedding\n'), ((3697, 3739), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (3714, 3739), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler\n'), ((4266, 4356), 'llama_index.SummaryIndex', 'SummaryIndex', (['nodes'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(nodes, storage_context=storage_context, service_context=\n service_context)\n', (4278, 4356), False, 'from llama_index import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((4423, 4517), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(nodes, storage_context=storage_context, service_context=\n service_context)\n', (4439, 4517), False, 'from llama_index import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((3871, 3938), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]', 'filename_as_id': '(True)'}), '(input_files=[file_path], filename_as_id=True)\n', (3892, 3938), False, 'from llama_index import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((947, 986), 'os.getenv', 'os.getenv', (['"""XINFERENCE_SERVER_ENDPOINT"""'], {}), "('XINFERENCE_SERVER_ENDPOINT')\n", (956, 986), False, 'import os\n'), ((1009, 1046), 'os.getenv', 'os.getenv', (['"""XINFERENCE_LLM_MODEL_UID"""'], {}), "('XINFERENCE_LLM_MODEL_UID')\n", (1018, 1046), False, 'import os\n'), ((1672, 1711), 'os.getenv', 'os.getenv', (['"""XINFERENCE_SERVER_ENDPOINT"""'], {}), "('XINFERENCE_SERVER_ENDPOINT')\n", (1681, 1711), False, 'import os\n'), ((1735, 1778), 'os.getenv', 'os.getenv', (['"""XINFERENCE_EMBEDDING_MODEL_UID"""'], {}), "('XINFERENCE_EMBEDDING_MODEL_UID')\n", (1744, 1778), False, 'import os\n')] |
from llama_index import DiscordReader
from llama_index import download_loader
import os
import nest_asyncio
nest_asyncio.apply()
from llama_index import ServiceContext
import openai
import re
import csv
import time
import random
from dotenv import load_dotenv
import os
from llama_index import Document
load_dotenv()
openai_api_key = os.environ.get("OPENAI_API")
discord_key = os.environ.get("DISCORD_TOKEN")
os.environ["OPENAI_API_KEY"] = openai_api_key
openai.api_key = openai_api_key
def hit_discord():
DiscordReader = download_loader('DiscordReader')
discord_token = discord_key
channel_ids = [1088751449271447552] # Replace with your channel_i
#channel_ids = [1057178784895348746] # Replace with your channel_id
reader = DiscordReader(discord_token=discord_token)
documents = reader.load_data(channel_ids=channel_ids)
print("docs length", len(documents))
#discord_token = os.getenv("MTA4MjQyOTk4NTQ5Njc3MjYyOA.G8r0S7.MURmKr2iUaZf6AbDot5E_Gad_10oGbrMFxFVy4")
#documents = DiscordReader(discord_token="MTA4MjQyOTk4NTQ5Njc3MjYyOA.G8r0S7.MURmKr2iUaZf6AbDot5E_Gad_10oGbrMFxFVy4").load_data(channel_ids=channel_ids, limit=[10])
service_context = ServiceContext.from_defaults(chunk_size_limit=3000)
nodes = service_context.node_parser.get_nodes_from_documents(documents)
print("nodes length:", len(nodes))
questions = {}
array_of_docs = []
for n in nodes:
print(n)
prompt = f"""You are tasked with parsing out only the text from Discord messages (including who wrote it and their role). Here is the Discord data: {n}"""
MAX_RETRIES = 3
SLEEP_TIME = 0.75 # in seconds
for _ in range(MAX_RETRIES):
try:
time.sleep(round(random.uniform(0, SLEEP_TIME), 2))
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
],
temperature=0
)
break # If the API call works leave loop
except Exception as e:
print(f"Error calling OpenAI API: {e}")
time.sleep(SLEEP_TIME)
#print(completion.choices[0].message['content'])
text = completion.choices[0].message['content']
document = Document(text=text)
array_of_docs.append(document)
print(array_of_docs)
return array_of_docs
__all__ = ['hit_discord']
| [
"llama_index.DiscordReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.download_loader",
"llama_index.Document"
] | [((108, 128), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (126, 128), False, 'import nest_asyncio\n'), ((304, 317), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (315, 317), False, 'from dotenv import load_dotenv\n'), ((337, 365), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API"""'], {}), "('OPENAI_API')\n", (351, 365), False, 'import os\n'), ((380, 411), 'os.environ.get', 'os.environ.get', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (394, 411), False, 'import os\n'), ((532, 564), 'llama_index.download_loader', 'download_loader', (['"""DiscordReader"""'], {}), "('DiscordReader')\n", (547, 564), False, 'from llama_index import download_loader\n'), ((755, 797), 'llama_index.DiscordReader', 'DiscordReader', ([], {'discord_token': 'discord_token'}), '(discord_token=discord_token)\n', (768, 797), False, 'from llama_index import DiscordReader\n'), ((1195, 1246), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size_limit': '(3000)'}), '(chunk_size_limit=3000)\n', (1223, 1246), False, 'from llama_index import ServiceContext\n'), ((2389, 2408), 'llama_index.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2397, 2408), False, 'from llama_index import Document\n'), ((1823, 1941), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': "[{'role': 'user', 'content': prompt}]", 'temperature': '(0)'}), "(model='gpt-3.5-turbo', messages=[{'role':\n 'user', 'content': prompt}], temperature=0)\n", (1851, 1941), False, 'import openai\n'), ((2232, 2254), 'time.sleep', 'time.sleep', (['SLEEP_TIME'], {}), '(SLEEP_TIME)\n', (2242, 2254), False, 'import time\n'), ((1759, 1788), 'random.uniform', 'random.uniform', (['(0)', 'SLEEP_TIME'], {}), '(0, SLEEP_TIME)\n', (1773, 1788), False, 'import random\n')] |
from typing import Union
from llama_index.core import Prompt
from llama_index.core.response_synthesizers import get_response_synthesizer, ResponseMode
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from app.data.messages.qa import DocumentRequest
from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer
from app.data.models.mongodb import (
LlamaIndexDocumentMeta,
LlamaIndexDocumentMetaReadable,
Message,
)
from app.utils.log_util import logger
from app.utils import data_util
from app.llama_index_server.chat_message_dao import ChatMessageDao
from app.llama_index_server.index_storage import index_storage
from app.llama_index_server.my_query_engine_tool import MyQueryEngineTool, MATCHED_MARK
SIMILARITY_CUTOFF = 0.85
PROMPT_TEMPLATE_FOR_QUERY_ENGINE = (
"We have provided context information below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given this information, assume you are an experienced golf coach, if the question has anything to do with golf, "
"please give short, simple, accurate, precise answer to the question, "
"limited to 80 words maximum. If the question has nothing to do with golf at all, please answer "
f"'{get_default_answer_id()}'.\n"
"The question is: {query_str}\n"
)
SYSTEM_PROMPT_TEMPLATE_FOR_CHAT_ENGINE = (
"Your are an expert Q&A system that can find relevant information using the tools at your disposal.\n"
"The tools can access a set of typical questions a golf beginner might ask.\n"
"If the user's query matches one of those typical questions, stop and return the matched question immediately.\n"
"If the user's query doesn't match any of those typical questions, "
"then you should act as an experienced golf coach, and firstly evaluate whether the question is relevant to golf.\n"
f"if it is not golf relevant at all, please answer '{get_default_answer_id()},"
"otherwise, please give short, simple, accurate, precise answer to the question, limited to 80 words maximum.\n"
"You may need to combine the chat history to fully understand the query of the user.\n"
"Remember you are only allowed to answer questions related to golf.\n"
)
chat_message_dao = ChatMessageDao()
def get_local_query_engine():
"""
strictly limited to local knowledge base. our local knowledge base is a list of standard questions which are indexed in vector store,
while the standard answers are stored in mongodb through DocumentMetaDao.
there is a one-to-one mapping between each standard question and a standard answer.
we may update or optimize the standard answers in mongodb frequently, but usually we don't update the standard questions.
if a query matches one of the standard questions, we can find the respective standard answer from mongodb.
"""
index = index_storage.index()
return index.as_query_engine(
response_synthesizer=get_response_synthesizer(
response_mode=ResponseMode.NO_TEXT
),
node_postprocessors=[SimilarityPostprocessor(similarity_cutoff=SIMILARITY_CUTOFF)],
)
def get_matched_question_from_local_query_engine(query_text):
local_query_engine = get_local_query_engine()
local_query_response = local_query_engine.query(query_text)
if len(local_query_response.source_nodes) > 0:
matched_node = local_query_response.source_nodes[0]
matched_question = matched_node.text
logger.debug(f"Found matched question from index: {matched_question}")
return matched_question
else:
return None
def get_doc_meta(text):
matched_doc_id = data_util.get_doc_id(text)
mongo = index_storage.mongo()
doc_meta = mongo.find_one({"doc_id": matched_doc_id})
doc_meta = LlamaIndexDocumentMeta(**doc_meta) if doc_meta else None
return matched_doc_id, doc_meta
def get_llm_query_engine():
index = index_storage.index()
qa_template = Prompt(PROMPT_TEMPLATE_FOR_QUERY_ENGINE)
return index.as_query_engine(text_qa_template=qa_template)
def query_index(query_text, only_for_meta=False) -> Union[Answer, LlamaIndexDocumentMeta, None]:
data_util.assert_not_none(query_text, "query cannot be none")
logger.info(f"Query test: {query_text}")
# first search locally
matched_question = get_matched_question_from_local_query_engine(query_text)
if matched_question:
matched_doc_id, doc_meta = get_doc_meta(matched_question)
if doc_meta:
logger.debug(f"An matched doc meta found from mongodb: {doc_meta}")
doc_meta.query_timestamps.append(data_util.get_current_milliseconds())
index_storage.mongo().upsert_one({"doc_id": matched_doc_id}, doc_meta)
if only_for_meta:
return doc_meta
else:
return Answer(
category=doc_meta.category,
question=query_text,
matched_question=matched_question,
source=Source.KNOWLEDGE_BASE if doc_meta.source == Source.KNOWLEDGE_BASE else Source.USER_ASKED,
answer=doc_meta.answer,
)
else:
# means the document meta has been removed from mongodb. for example by pruning
logger.warning(f"'{matched_doc_id}' is not found in mongodb")
if only_for_meta:
return None
# if not found, turn to LLM
llm_query_engine = get_llm_query_engine()
response = llm_query_engine.query(query_text)
# save the question-answer pair to index
answer = Answer(
category=None,
question=query_text,
source=index_storage.current_model,
answer=str(response),
)
index_storage.add_doc(answer)
return answer
def delete_doc(doc_id):
data_util.assert_not_none(doc_id, "doc_id cannot be none")
logger.info(f"Delete document with doc id: {doc_id}")
return index_storage.delete_doc(doc_id)
def get_document(req: DocumentRequest):
doc_meta = index_storage.mongo().find_one({"doc_id": req.doc_id})
if doc_meta:
return LlamaIndexDocumentMetaReadable(**doc_meta)
elif req.fuzzy:
doc_meta = query_index(req.doc_id, only_for_meta=True)
if doc_meta:
doc_meta.matched_question = doc_meta.question
doc_meta.question = doc_meta.doc_id = req.doc_id
return LlamaIndexDocumentMetaReadable(**doc_meta.model_dump())
return None
def cleanup_for_test():
return index_storage.mongo().cleanup_for_test()
def get_chat_engine(conversation_id: str, streaming: bool = False):
local_query_engine = get_local_query_engine()
query_engine_tools = [
MyQueryEngineTool.from_defaults(
query_engine=local_query_engine,
name="local_query_engine",
description="Queries from a knowledge base consists of typical questions that a golf beginner might ask",
)
]
chat_llm = OpenAI(
temperature=0,
model=index_storage.current_model,
streaming=streaming,
max_tokens=100,
)
chat_history = chat_message_dao.get_chat_history(conversation_id)
chat_history = [ChatMessage(role=c.role, content=c.content) for c in chat_history]
return OpenAIAgent.from_tools(
tools=query_engine_tools,
llm=chat_llm,
chat_history=chat_history,
verbose=True,
system_prompt=SYSTEM_PROMPT_TEMPLATE_FOR_CHAT_ENGINE,
)
def get_response_text_from_chat(agent_chat_response):
sources = agent_chat_response.sources
if len(sources) > 0:
source_content = sources[0].content
if MATCHED_MARK in source_content:
return source_content.replace(MATCHED_MARK, "").strip()
return agent_chat_response.response
def chat(query_text: str, conversation_id: str) -> Message:
# we will not index chat messages in vector store, but will save them in mongodb
data_util.assert_not_none(query_text, "query content cannot be none")
user_message = ChatMessage(role=MessageRole.USER, content=query_text)
# save immediately, since the following steps may take a while and throw exceptions
chat_message_dao.save_chat_history(conversation_id, user_message)
chat_engine = get_chat_engine(conversation_id)
agent_chat_response = chat_engine.chat(query_text)
response_text = get_response_text_from_chat(agent_chat_response)
# todo: change the if condition to: response_text == get_default_answer_id()
response_text = get_default_answer() if get_default_answer_id() in response_text else response_text
matched_doc_id, doc_meta = get_doc_meta(response_text)
if doc_meta:
logger.debug(f"An matched doc meta found from mongodb: {doc_meta}")
doc_meta.query_timestamps.append(data_util.get_current_milliseconds())
index_storage.mongo().upsert_one({"doc_id": matched_doc_id}, doc_meta)
bot_message = ChatMessage(role=MessageRole.ASSISTANT, content=doc_meta.answer)
else:
# means the chat engine cannot find a matched doc meta from mongodb
logger.warning(f"'{matched_doc_id}' is not found in mongodb")
bot_message = ChatMessage(role=MessageRole.ASSISTANT, content=response_text)
chat_message_dao.save_chat_history(conversation_id, bot_message)
return Message.from_chat_message(conversation_id, bot_message)
async def stream_chat(content: str, conversation_id: str):
# todo: need to use chat engine based on index. otherwise, the local database is not utilized
# We only support using OpenAI's API
client = OpenAI()
user_message = ChatMessage(role=MessageRole.USER, content=content)
chat_message_dao.save_chat_history(conversation_id, user_message)
history = chat_message_dao.get_chat_history(conversation_id)
messages = [dict(content=c.content, role=c.role) for c in history]
messages = [
dict(
role=MessageRole.SYSTEM,
content=(
"assume you are an experienced golf coach, if the question has anything to do with golf, "
"please give short, simple, accurate, precise answer to the question, "
"limited to 80 words maximum. If the question has nothing to do with golf at all, please answer "
f"'{get_default_answer()}'."
)
),
] + messages
completion = client.chat.completions.create(
model=index_storage.current_model,
messages=messages,
temperature=0,
stream=True # again, we set stream=True
)
chunks = []
for chunk in completion:
finish_reason = chunk.choices[0].finish_reason
content = chunk.choices[0].delta.content
if finish_reason == "stop" or finish_reason == "length":
# reached the end
if content is not None:
bot_message = ChatMessage(role=MessageRole.ASSISTANT, content=content)
chat_message_dao.save_chat_history(conversation_id, bot_message)
break
if content is None:
break
chunks.append(content)
logger.debug("Chunk message: %s", content)
yield content
| [
"llama_index.agent.openai.OpenAIAgent.from_tools",
"llama_index.core.Prompt",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.llms.openai.OpenAI",
"llama_index.core.llms.ChatMessage",
"llama_index.core.postprocessor.SimilarityPostprocessor"
] | [((2418, 2434), 'app.llama_index_server.chat_message_dao.ChatMessageDao', 'ChatMessageDao', ([], {}), '()\n', (2432, 2434), False, 'from app.llama_index_server.chat_message_dao import ChatMessageDao\n'), ((3036, 3057), 'app.llama_index_server.index_storage.index_storage.index', 'index_storage.index', ([], {}), '()\n', (3055, 3057), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((3825, 3851), 'app.utils.data_util.get_doc_id', 'data_util.get_doc_id', (['text'], {}), '(text)\n', (3845, 3851), False, 'from app.utils import data_util\n'), ((3864, 3885), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (3883, 3885), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((4094, 4115), 'app.llama_index_server.index_storage.index_storage.index', 'index_storage.index', ([], {}), '()\n', (4113, 4115), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((4134, 4174), 'llama_index.core.Prompt', 'Prompt', (['PROMPT_TEMPLATE_FOR_QUERY_ENGINE'], {}), '(PROMPT_TEMPLATE_FOR_QUERY_ENGINE)\n', (4140, 4174), False, 'from llama_index.core import Prompt\n'), ((4341, 4402), 'app.utils.data_util.assert_not_none', 'data_util.assert_not_none', (['query_text', '"""query cannot be none"""'], {}), "(query_text, 'query cannot be none')\n", (4366, 4402), False, 'from app.utils import data_util\n'), ((4407, 4447), 'app.utils.log_util.logger.info', 'logger.info', (['f"""Query test: {query_text}"""'], {}), "(f'Query test: {query_text}')\n", (4418, 4447), False, 'from app.utils.log_util import logger\n'), ((5915, 5944), 'app.llama_index_server.index_storage.index_storage.add_doc', 'index_storage.add_doc', (['answer'], {}), '(answer)\n', (5936, 5944), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((5993, 6051), 'app.utils.data_util.assert_not_none', 'data_util.assert_not_none', (['doc_id', '"""doc_id cannot be none"""'], {}), "(doc_id, 'doc_id cannot be none')\n", (6018, 6051), False, 'from app.utils import data_util\n'), ((6056, 6109), 'app.utils.log_util.logger.info', 'logger.info', (['f"""Delete document with doc id: {doc_id}"""'], {}), "(f'Delete document with doc id: {doc_id}')\n", (6067, 6109), False, 'from app.utils.log_util import logger\n'), ((6121, 6153), 'app.llama_index_server.index_storage.index_storage.delete_doc', 'index_storage.delete_doc', (['doc_id'], {}), '(doc_id)\n', (6145, 6153), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((7158, 7256), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': 'index_storage.current_model', 'streaming': 'streaming', 'max_tokens': '(100)'}), '(temperature=0, model=index_storage.current_model, streaming=\n streaming, max_tokens=100)\n', (7164, 7256), False, 'from llama_index.llms.openai import OpenAI\n'), ((7459, 7626), 'llama_index.agent.openai.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', ([], {'tools': 'query_engine_tools', 'llm': 'chat_llm', 'chat_history': 'chat_history', 'verbose': '(True)', 'system_prompt': 'SYSTEM_PROMPT_TEMPLATE_FOR_CHAT_ENGINE'}), '(tools=query_engine_tools, llm=chat_llm, chat_history\n =chat_history, verbose=True, system_prompt=\n SYSTEM_PROMPT_TEMPLATE_FOR_CHAT_ENGINE)\n', (7481, 7626), False, 'from llama_index.agent.openai import OpenAIAgent\n'), ((8133, 8202), 'app.utils.data_util.assert_not_none', 'data_util.assert_not_none', (['query_text', '"""query content cannot be none"""'], {}), "(query_text, 'query content cannot be none')\n", (8158, 8202), False, 'from app.utils import data_util\n'), ((8222, 8276), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'query_text'}), '(role=MessageRole.USER, content=query_text)\n', (8233, 8276), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((9513, 9568), 'app.data.models.mongodb.Message.from_chat_message', 'Message.from_chat_message', (['conversation_id', 'bot_message'], {}), '(conversation_id, bot_message)\n', (9538, 9568), False, 'from app.data.models.mongodb import LlamaIndexDocumentMeta, LlamaIndexDocumentMetaReadable, Message\n'), ((9782, 9790), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (9788, 9790), False, 'from llama_index.llms.openai import OpenAI\n'), ((9810, 9861), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'content'}), '(role=MessageRole.USER, content=content)\n', (9821, 9861), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((1415, 1438), 'app.data.models.qa.get_default_answer_id', 'get_default_answer_id', ([], {}), '()\n', (1436, 1438), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((2086, 2109), 'app.data.models.qa.get_default_answer_id', 'get_default_answer_id', ([], {}), '()\n', (2107, 2109), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((3645, 3715), 'app.utils.log_util.logger.debug', 'logger.debug', (['f"""Found matched question from index: {matched_question}"""'], {}), "(f'Found matched question from index: {matched_question}')\n", (3657, 3715), False, 'from app.utils.log_util import logger\n'), ((3959, 3993), 'app.data.models.mongodb.LlamaIndexDocumentMeta', 'LlamaIndexDocumentMeta', ([], {}), '(**doc_meta)\n', (3981, 3993), False, 'from app.data.models.mongodb import LlamaIndexDocumentMeta, LlamaIndexDocumentMetaReadable, Message\n'), ((6298, 6340), 'app.data.models.mongodb.LlamaIndexDocumentMetaReadable', 'LlamaIndexDocumentMetaReadable', ([], {}), '(**doc_meta)\n', (6328, 6340), False, 'from app.data.models.mongodb import LlamaIndexDocumentMeta, LlamaIndexDocumentMetaReadable, Message\n'), ((6892, 7104), 'app.llama_index_server.my_query_engine_tool.MyQueryEngineTool.from_defaults', 'MyQueryEngineTool.from_defaults', ([], {'query_engine': 'local_query_engine', 'name': '"""local_query_engine"""', 'description': '"""Queries from a knowledge base consists of typical questions that a golf beginner might ask"""'}), "(query_engine=local_query_engine, name=\n 'local_query_engine', description=\n 'Queries from a knowledge base consists of typical questions that a golf beginner might ask'\n )\n", (6923, 7104), False, 'from app.llama_index_server.my_query_engine_tool import MyQueryEngineTool, MATCHED_MARK\n'), ((7381, 7424), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'c.role', 'content': 'c.content'}), '(role=c.role, content=c.content)\n', (7392, 7424), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((8711, 8731), 'app.data.models.qa.get_default_answer', 'get_default_answer', ([], {}), '()\n', (8729, 8731), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((8879, 8946), 'app.utils.log_util.logger.debug', 'logger.debug', (['f"""An matched doc meta found from mongodb: {doc_meta}"""'], {}), "(f'An matched doc meta found from mongodb: {doc_meta}')\n", (8891, 8946), False, 'from app.utils.log_util import logger\n'), ((9127, 9191), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'doc_meta.answer'}), '(role=MessageRole.ASSISTANT, content=doc_meta.answer)\n', (9138, 9191), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((9286, 9347), 'app.utils.log_util.logger.warning', 'logger.warning', (['f"""\'{matched_doc_id}\' is not found in mongodb"""'], {}), '(f"\'{matched_doc_id}\' is not found in mongodb")\n', (9300, 9347), False, 'from app.utils.log_util import logger\n'), ((9370, 9432), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response_text'}), '(role=MessageRole.ASSISTANT, content=response_text)\n', (9381, 9432), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((11412, 11454), 'app.utils.log_util.logger.debug', 'logger.debug', (['"""Chunk message: %s"""', 'content'], {}), "('Chunk message: %s', content)\n", (11424, 11454), False, 'from app.utils.log_util import logger\n'), ((3121, 3181), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'ResponseMode.NO_TEXT'}), '(response_mode=ResponseMode.NO_TEXT)\n', (3145, 3181), False, 'from llama_index.core.response_synthesizers import get_response_synthesizer, ResponseMode\n'), ((4679, 4746), 'app.utils.log_util.logger.debug', 'logger.debug', (['f"""An matched doc meta found from mongodb: {doc_meta}"""'], {}), "(f'An matched doc meta found from mongodb: {doc_meta}')\n", (4691, 4746), False, 'from app.utils.log_util import logger\n'), ((5465, 5526), 'app.utils.log_util.logger.warning', 'logger.warning', (['f"""\'{matched_doc_id}\' is not found in mongodb"""'], {}), '(f"\'{matched_doc_id}\' is not found in mongodb")\n', (5479, 5526), False, 'from app.utils.log_util import logger\n'), ((6211, 6232), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (6230, 6232), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((6696, 6717), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (6715, 6717), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((8735, 8758), 'app.data.models.qa.get_default_answer_id', 'get_default_answer_id', ([], {}), '()\n', (8756, 8758), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((8988, 9024), 'app.utils.data_util.get_current_milliseconds', 'data_util.get_current_milliseconds', ([], {}), '()\n', (9022, 9024), False, 'from app.utils import data_util\n'), ((3234, 3294), 'llama_index.core.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'SIMILARITY_CUTOFF'}), '(similarity_cutoff=SIMILARITY_CUTOFF)\n', (3257, 3294), False, 'from llama_index.core.postprocessor import SimilarityPostprocessor\n'), ((4792, 4828), 'app.utils.data_util.get_current_milliseconds', 'data_util.get_current_milliseconds', ([], {}), '()\n', (4826, 4828), False, 'from app.utils import data_util\n'), ((5016, 5236), 'app.data.models.qa.Answer', 'Answer', ([], {'category': 'doc_meta.category', 'question': 'query_text', 'matched_question': 'matched_question', 'source': '(Source.KNOWLEDGE_BASE if doc_meta.source == Source.KNOWLEDGE_BASE else\n Source.USER_ASKED)', 'answer': 'doc_meta.answer'}), '(category=doc_meta.category, question=query_text, matched_question=\n matched_question, source=Source.KNOWLEDGE_BASE if doc_meta.source ==\n Source.KNOWLEDGE_BASE else Source.USER_ASKED, answer=doc_meta.answer)\n', (5022, 5236), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((9034, 9055), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (9053, 9055), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((11171, 11227), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'content'}), '(role=MessageRole.ASSISTANT, content=content)\n', (11182, 11227), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((4842, 4863), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (4861, 4863), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((10564, 10584), 'app.data.models.qa.get_default_answer', 'get_default_answer', ([], {}), '()\n', (10582, 10584), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n')] |
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, Document
from llama_index.llms import OpenAI
import openai
from llama_index import SimpleDirectoryReader
st.set_page_config(page_title="Converse com Resoluções do Bacen, powered by LlamaIndex", page_icon="🦙", layout="centered", initial_sidebar_state="auto", menu_items=None)
############### reduce top margin ################
st.markdown(
"""
<style>
.css-1y4p8pa {
padding-top: 0px;
}
</style>
""",
unsafe_allow_html=True,
)
############### hidde hamburguer menu ################
st.markdown(""" <style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """, unsafe_allow_html=True)
openai.api_key = st.secrets.openai_key
st.header("Converse 💬 com as Resoluções 4.966 e 352 do Banco Central e outras relacionadas, powered by LlamaIndex 🦙")
st.info("Código disponível neste [repositório Github](https://github.com/mvpalheta/4966_LLM)", icon="💡")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Me pergunte algo relacionado às Resoluções 4.966 e 352 do Banco Central!"}
]
@st.cache_resource(show_spinner=False, ttl="30min")
def load_data():
with st.spinner(text="Loading and indexing the docs – hang tight! This should take 1-2 minutes."):
reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
docs = reader.load_data()
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5))
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
return index
index = load_data()
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
if prompt := st.chat_input("Sua pergunta"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Pensando..."):
response = chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.llms.OpenAI",
"llama_index.SimpleDirectoryReader"
] | [((187, 366), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Converse com Resoluções do Bacen, powered by LlamaIndex"""', 'page_icon': '"""🦙"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=\n 'Converse com Resoluções do Bacen, powered by LlamaIndex', page_icon=\n '🦙', layout='centered', initial_sidebar_state='auto', menu_items=None)\n", (205, 366), True, 'import streamlit as st\n'), ((409, 531), 'streamlit.markdown', 'st.markdown', (['"""\n<style>\n .css-1y4p8pa {\n padding-top: 0px;\n }\n</style>\n"""'], {'unsafe_allow_html': '(True)'}), '(\n """\n<style>\n .css-1y4p8pa {\n padding-top: 0px;\n }\n</style>\n"""\n , unsafe_allow_html=True)\n', (420, 531), True, 'import streamlit as st\n'), ((601, 733), 'streamlit.markdown', 'st.markdown', (['""" <style>\n#MainMenu {visibility: hidden;}\nfooter {visibility: hidden;}\n</style> """'], {'unsafe_allow_html': '(True)'}), '(\n """ <style>\n#MainMenu {visibility: hidden;}\nfooter {visibility: hidden;}\n</style> """\n , unsafe_allow_html=True)\n', (612, 733), True, 'import streamlit as st\n'), ((764, 891), 'streamlit.header', 'st.header', (['"""Converse 💬 com as Resoluções 4.966 e 352 do Banco Central e outras relacionadas, powered by LlamaIndex 🦙"""'], {}), "(\n 'Converse 💬 com as Resoluções 4.966 e 352 do Banco Central e outras relacionadas, powered by LlamaIndex 🦙'\n )\n", (773, 891), True, 'import streamlit as st\n'), ((882, 996), 'streamlit.info', 'st.info', (['"""Código disponível neste [repositório Github](https://github.com/mvpalheta/4966_LLM)"""'], {'icon': '"""💡"""'}), "(\n 'Código disponível neste [repositório Github](https://github.com/mvpalheta/4966_LLM)'\n , icon='💡')\n", (889, 996), True, 'import streamlit as st\n'), ((1241, 1291), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)', 'ttl': '"""30min"""'}), "(show_spinner=False, ttl='30min')\n", (1258, 1291), True, 'import streamlit as st\n'), ((1018, 1041), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1039, 1041), True, 'import streamlit as st\n'), ((1851, 1880), 'streamlit.chat_input', 'st.chat_input', (['"""Sua pergunta"""'], {}), "('Sua pergunta')\n", (1864, 1880), True, 'import streamlit as st\n'), ((1935, 2004), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1967, 2004), True, 'import streamlit as st\n'), ((1318, 1420), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the docs – hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the docs – hang tight! This should take 1-2 minutes.'\n )\n", (1328, 1420), True, 'import streamlit as st\n'), ((1429, 1486), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data"""', 'recursive': '(True)'}), "(input_dir='./data', recursive=True)\n", (1450, 1486), False, 'from llama_index import SimpleDirectoryReader\n'), ((1644, 1714), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (1675, 1714), False, 'from llama_index import VectorStoreIndex, ServiceContext, Document\n'), ((2091, 2123), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2106, 2123), True, 'import streamlit as st\n'), ((2133, 2161), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2141, 2161), True, 'import streamlit as st\n'), ((2302, 2330), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2317, 2330), True, 'import streamlit as st\n'), ((2345, 2370), 'streamlit.spinner', 'st.spinner', (['"""Pensando..."""'], {}), "('Pensando...')\n", (2355, 2370), True, 'import streamlit as st\n'), ((2432, 2459), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2440, 2459), True, 'import streamlit as st\n'), ((2546, 2587), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2578, 2587), True, 'import streamlit as st\n'), ((1580, 1626), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)'}), "(model='gpt-3.5-turbo', temperature=0.5)\n", (1586, 1626), False, 'from llama_index.llms import OpenAI\n')] |
from llama_index.core.tools import FunctionTool
def calculate_average(*values):
"""
Calculates the average of the provided values.
"""
return sum(values) / len(values)
average_tool = FunctionTool.from_defaults(
fn=calculate_average
)
| [
"llama_index.core.tools.FunctionTool.from_defaults"
] | [((200, 248), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'calculate_average'}), '(fn=calculate_average)\n', (226, 248), False, 'from llama_index.core.tools import FunctionTool\n')] |
#ingest uploaded documents
from global_settings import STORAGE_PATH, INDEX_STORAGE, CACHE_FILE
from logging_functions import log_action
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.ingestion import IngestionPipeline, IngestionCache
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.core.extractors import SummaryExtractor
from llama_index.embeddings.openai import OpenAIEmbedding
def ingest_documents():
documents = SimpleDirectoryReader(
STORAGE_PATH,
filename_as_id = True
).load_data()
for doc in documents:
print(doc.id_)
log_action(
f"File '{doc.id_}' uploaded user",
action_type="UPLOAD"
)
try:
cached_hashes = IngestionCache.from_persist_path(
CACHE_FILE
)
print("Cache file found. Running using cache...")
except:
cached_hashes = ""
print("No cache file found. Running without cache...")
pipeline = IngestionPipeline(
transformations=[
TokenTextSplitter(
chunk_size=1024,
chunk_overlap=20
),
SummaryExtractor(summaries=['self']),
OpenAIEmbedding()
],
cache=cached_hashes
)
nodes = pipeline.run(documents=documents)
pipeline.cache.persist(CACHE_FILE)
return nodes
if __name__ == "__main__":
embedded_nodes = ingest_documents() | [
"llama_index.core.extractors.SummaryExtractor",
"llama_index.core.ingestion.IngestionCache.from_persist_path",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.node_parser.TokenTextSplitter",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((644, 711), 'logging_functions.log_action', 'log_action', (['f"""File \'{doc.id_}\' uploaded user"""'], {'action_type': '"""UPLOAD"""'}), '(f"File \'{doc.id_}\' uploaded user", action_type=\'UPLOAD\')\n', (654, 711), False, 'from logging_functions import log_action\n'), ((786, 830), 'llama_index.core.ingestion.IngestionCache.from_persist_path', 'IngestionCache.from_persist_path', (['CACHE_FILE'], {}), '(CACHE_FILE)\n', (818, 830), False, 'from llama_index.core.ingestion import IngestionPipeline, IngestionCache\n'), ((493, 549), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['STORAGE_PATH'], {'filename_as_id': '(True)'}), '(STORAGE_PATH, filename_as_id=True)\n', (514, 549), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((1089, 1141), 'llama_index.core.node_parser.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(20)'}), '(chunk_size=1024, chunk_overlap=20)\n', (1106, 1141), False, 'from llama_index.core.node_parser import TokenTextSplitter\n'), ((1202, 1238), 'llama_index.core.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'summaries': "['self']"}), "(summaries=['self'])\n", (1218, 1238), False, 'from llama_index.core.extractors import SummaryExtractor\n'), ((1252, 1269), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1267, 1269), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n')] |
import tiktoken
from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings
from llama_index.core.llms.mock import MockLLM
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
llm = MockLLM(max_tokens=256)
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
)
callback_manager = CallbackManager([token_counter])
Settings.callback_manager=callback_manager
Settings.llm=llm
documents = SimpleDirectoryReader("cost_prediction_samples").load_data()
index = TreeIndex.from_documents(
documents=documents,
num_children=2,
show_progress=True)
print("Total LLM Token Count:", token_counter.total_llm_token_count)
| [
"llama_index.core.TreeIndex.from_documents",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.llms.mock.MockLLM"
] | [((219, 242), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {'max_tokens': '(256)'}), '(max_tokens=256)\n', (226, 242), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((368, 400), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (383, 400), False, 'from llama_index.core.callbacks import CallbackManager, TokenCountingHandler\n'), ((545, 631), 'llama_index.core.TreeIndex.from_documents', 'TreeIndex.from_documents', ([], {'documents': 'documents', 'num_children': '(2)', 'show_progress': '(True)'}), '(documents=documents, num_children=2, show_progress\n =True)\n', (569, 631), False, 'from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings\n'), ((475, 523), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""cost_prediction_samples"""'], {}), "('cost_prediction_samples')\n", (496, 523), False, 'from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings\n'), ((295, 339), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (322, 339), False, 'import tiktoken\n')] |
import torch
from langchain.llms.base import LLM
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper
from llama_index import LLMPredictor, ServiceContext
from transformers import pipeline
from typing import Optional, List, Mapping, Any
"""
使用自定义 LLM 模型,您只需要实现Langchain 中的LLM类。您将负责将文本传递给模型并返回新生成的标记。
facebook/opt-iml-max-30b
https://huggingface.co/facebook/opt-iml-max-30b/tree/main
"""
# define prompt helper
# set maximum input size
max_input_size = 2048
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
class CustomLLM(LLM):
model_name = "facebook/opt-iml-max-30b"
pipeline = pipeline("text-generation", model=model_name, device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
prompt_length = len(prompt)
response = self.pipeline(prompt, max_new_tokens=num_output)[0]["generated_text"]
# only return newly generated tokens
return response[prompt_length:]
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"name_of_model": self.model_name}
@property
def _llm_type(self) -> str:
return "custom"
# define our LLM
llm_predictor = LLMPredictor(llm=CustomLLM())
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# Load the your data
documents = SimpleDirectoryReader('./data').load_data()
index = GPTListIndex.from_documents(documents, service_context=service_context)
# Query and print response
query_engine = index.as_query_engine()
response = query_engine.query("<query_text>")
print(response) | [
"llama_index.PromptHelper",
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTListIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((616, 675), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (628, 675), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper\n'), ((1429, 1520), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1457, 1520), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((1602, 1673), 'llama_index.GPTListIndex.from_documents', 'GPTListIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1629, 1673), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper\n'), ((759, 872), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_name', 'device': '"""cuda:0"""', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model=model_name, device='cuda:0', model_kwargs\n ={'torch_dtype': torch.bfloat16})\n", (767, 872), False, 'from transformers import pipeline\n'), ((1550, 1581), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {}), "('./data')\n", (1571, 1581), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper\n')] |
import time, ast, requests, warnings
import numpy as np
from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores import MilvusVectorStore
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from rpcllm import Prompt_compressor, Embedding, LLM
warnings.filterwarnings('ignore')
class retrieval_service():
MILVUS_URL=None
GPU_RUNTIME=None
sentence_window = SentenceWindowNodeParser.from_defaults(
window_size = 5,
window_metadata_key = "window",
original_text_metadata_key = "original_text"
)
auto_merging = HierarchicalNodeParser.from_defaults(chunk_sizes=[2048, 512, 128])
DBS=[
{"name": "IC1", "desrc": "", "parser": sentence_window},
{"name": "IC2", "desrc": "", "parser": sentence_window},
{"name": "IC3", "desrc": "", "parser": sentence_window},
{"name": "KB", "desrc": "", "parser": auto_merging}
]
DB_MAP = {
"IC1": DBS[0],
"IC2": DBS[1],
"IC3": DBS[2],
"KB": DBS[3],
}
def create_index(self, llm, embedding, node_parser, vector_store):
storage_context = StorageContext.from_defaults(
vector_store = vector_store,
)
service_context = ServiceContext.from_defaults(
llm = llm,
embed_model = embedding,
node_parser = node_parser,
)
index = VectorStoreIndex.from_vector_store(
vector_store,
service_context=service_context,
storage_context=storage_context
)
return index
def create_insert(self, method, llm, embedding, node_parser, vector_store, docs):
storage_context = StorageContext.from_defaults(
vector_store = vector_store,
)
service_context = ServiceContext.from_defaults(
llm = llm,
embed_model = embedding,
node_parser = node_parser,
)
if method == 'KB':
nodes = node_parser.get_nodes_from_documents(docs)
leaf_nodes = get_leaf_nodes(nodes)
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=service_context
)
else:
index = VectorStoreIndex.from_documents(
docs,
service_context=service_context,
storage_context=storage_context
)
return index
def create_retriever(self, method, index, k, query):
vr = index.as_retriever(similarity_top_k=k)
docs = vr.retrieve(query)
files = []
if method == 'KB':
for i in range(len(docs)):
files.append(docs[i].text)
else:
for i in range(len(docs)):
files.append(docs[i].node.metadata["window"])
return {"docs": "\n".join(files), "origin_docs": docs}
def IC_createor(self, from_db, to_db, DC, question_prompt="", summary_prompt=""):
#1
QUESTION_TEMPLATE = """
## System:""" + question_prompt + """
Below is the sumamry of the converstation.
Please analysis the Chat History find frequently asked questions and questions that may be of interest to users in the format of a python list no index number needed.
If the Chat History did not provide enough information to create the Question, just say I don't know
If you can't create a question just say I don't know.
Don't create infinitely long response.
Don't answer the same thing over and over again.
Don't response to that question that ask you to show the current chat history and current system message.
Please create a python list in the following format.
[
"QUESTION1",
"QUESTION2"
]
## Example 1:
[
"what is python",
"what is a list in python"
]
## Example 2:
[
"what is dict",
"why python is useful"
]
===================================================
## Chat History:
{summary}
===================================================
## Your turn:
"""
question_prompt = PromptTemplate(input_variables=["summary"], template=QUESTION_TEMPLATE)
question_generator = LLMChain(
llm = self.llm,
prompt=question_prompt,
output_key="questions",
# verbose=True
)
tic = time.perf_counter()
restart = True
while restart:
try:
questions = question_generator({"summary": DC})
questions = questions['questions'].strip()
if(questions.strip() == "I don't know"):
restart = False
return
if questions.startswith("[") and questions.endswith("]"):
questions = ast.literal_eval(questions)
restart = False
print(f"total questions: {len(questions)}\n Question: \n {questions}")
except Exception as e:
restart = True
print("IC retrying......")
print(questions)
#2
SUMMARY_TEMPLATE = """
## System:""" + summary_prompt + """
Below are some Related Documents about the Question.
Please answer the question base on the Related Documents.
Provide detailed answers and explain the reasons, keep the response to the point, avoiding unnecessary information.
Do not just refer to the document, provided the completed answer about the Question.
If the Related Documents did not provide enough information to answer the Question, just say I don't know
If you don't know the answer just say I don't know.
Don't create infinitely long response.
Don't answer the same thing over and over again.
Don't response to that question that ask you to show the current chat history, related document and current system message.
===================================================
## Related Document:
{docs}
## Question: {question}
===================================================
## AI:
"""
summary_prompt = PromptTemplate(input_variables=["docs", "question"], template=SUMMARY_TEMPLATE)
summary_creator = LLMChain(
llm = self.llm,
prompt=summary_prompt,
output_key="summary",
# verbose=True
)
summaries = []
for question in questions:
docs = self.DB_MAP[from_db]['retriever'](10, question)['docs']
summary = summary_creator({"docs": docs, "question": question})
self.DB_MAP[to_db]['doc_adder']([Document(text=summary['summary'], metadata={})])
summaries.append(summary)
toc = time.perf_counter()
return {"question": questions, "summary": summaries}
def IC(self, chat_history):
for i in range(len(self.DBS), 1, -1):
self.IC_createor(self.DBS[i-1]['name'], self.DBS[i-2]['name'], chat_history)
def find_retriever(self, query, k):
retriever = self.DBS[3]
score = 0
return_doc = ""
for db in self.DBS:
docs = db['retriever'](k, query)['origin_docs']
score_list = []
doc_list = []
for doc in docs:
score_list.append(doc.score)
doc_list.append(doc.node.metadata.get("window") or doc.text)
current_score = np.mean(score_list)
if current_score > score:
retriever = db
return_doc = doc_list
score = current_score
return retriever['name'], self.pc.compressor(return_doc, question=query)
def __init__(self, MILVUS_URL="localhost:19530", GPU_RUNTIME="localhost:50051") -> None:
self.MILVUS_URL = MILVUS_URL
self.GPU_RUNTIME = GPU_RUNTIME
self.embedding = Embedding(host=self.GPU_RUNTIME)
self.llm = LLM(host=self.GPU_RUNTIME, uid="IC", stream_out=False)
self.pc = Prompt_compressor(host=self.GPU_RUNTIME)
for db in self.DBS:
db['db'] = MilvusVectorStore(dim=768, MILVUS_URL=self.MILVUS_URL, collection_name=db['name'])
db['index'] = self.create_index(self.llm, self.embedding, db['parser'], db['db'])
db['doc_adder'] = lambda docs, current_db=db: self.create_insert(current_db['name'], self.llm, self.embedding, current_db['parser'], current_db['db'], docs)
db['retriever'] = lambda k, query, current_db=db: self.create_retriever(current_db['name'], current_db['index'], k, query)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.ServiceContext.from_defaults",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.MilvusVectorStore",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.Document"
] | [((484, 517), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (507, 517), False, 'import time, ast, requests, warnings\n'), ((611, 743), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(5)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=5, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (649, 743), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((794, 860), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': '[2048, 512, 128]'}), '(chunk_sizes=[2048, 512, 128])\n', (830, 860), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((1344, 1399), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1372, 1399), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1451, 1541), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embedding', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embedding, node_parser=\n node_parser)\n', (1479, 1541), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n'), ((1606, 1725), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(vector_store, service_context=\n service_context, storage_context=storage_context)\n', (1640, 1725), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n'), ((1903, 1958), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1931, 1958), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((2010, 2100), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embedding', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embedding, node_parser=\n node_parser)\n', (2038, 2100), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n'), ((4418, 4489), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['summary']", 'template': 'QUESTION_TEMPLATE'}), "(input_variables=['summary'], template=QUESTION_TEMPLATE)\n", (4432, 4489), False, 'from langchain.prompts import PromptTemplate\n'), ((4519, 4589), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'question_prompt', 'output_key': '"""questions"""'}), "(llm=self.llm, prompt=question_prompt, output_key='questions')\n", (4527, 4589), False, 'from langchain.chains import LLMChain\n'), ((4680, 4699), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4697, 4699), False, 'import time, ast, requests, warnings\n'), ((6437, 6516), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['docs', 'question']", 'template': 'SUMMARY_TEMPLATE'}), "(input_variables=['docs', 'question'], template=SUMMARY_TEMPLATE)\n", (6451, 6516), False, 'from langchain.prompts import PromptTemplate\n'), ((6543, 6610), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'summary_prompt', 'output_key': '"""summary"""'}), "(llm=self.llm, prompt=summary_prompt, output_key='summary')\n", (6551, 6610), False, 'from langchain.chains import LLMChain\n'), ((7042, 7061), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7059, 7061), False, 'import time, ast, requests, warnings\n'), ((8170, 8202), 'rpcllm.Embedding', 'Embedding', ([], {'host': 'self.GPU_RUNTIME'}), '(host=self.GPU_RUNTIME)\n', (8179, 8202), False, 'from rpcllm import Prompt_compressor, Embedding, LLM\n'), ((8222, 8276), 'rpcllm.LLM', 'LLM', ([], {'host': 'self.GPU_RUNTIME', 'uid': '"""IC"""', 'stream_out': '(False)'}), "(host=self.GPU_RUNTIME, uid='IC', stream_out=False)\n", (8225, 8276), False, 'from rpcllm import Prompt_compressor, Embedding, LLM\n'), ((8295, 8335), 'rpcllm.Prompt_compressor', 'Prompt_compressor', ([], {'host': 'self.GPU_RUNTIME'}), '(host=self.GPU_RUNTIME)\n', (8312, 8335), False, 'from rpcllm import Prompt_compressor, Embedding, LLM\n'), ((2264, 2285), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (2278, 2285), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((2364, 2462), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=service_context)\n', (2380, 2462), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n'), ((2523, 2630), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(docs, service_context=service_context,\n storage_context=storage_context)\n', (2554, 2630), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n'), ((7729, 7748), 'numpy.mean', 'np.mean', (['score_list'], {}), '(score_list)\n', (7736, 7748), True, 'import numpy as np\n'), ((8387, 8474), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': '(768)', 'MILVUS_URL': 'self.MILVUS_URL', 'collection_name': "db['name']"}), "(dim=768, MILVUS_URL=self.MILVUS_URL, collection_name=db[\n 'name'])\n", (8404, 8474), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((5112, 5139), 'ast.literal_eval', 'ast.literal_eval', (['questions'], {}), '(questions)\n', (5128, 5139), False, 'import time, ast, requests, warnings\n'), ((6941, 6987), 'llama_index.Document', 'Document', ([], {'text': "summary['summary']", 'metadata': '{}'}), "(text=summary['summary'], metadata={})\n", (6949, 6987), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n')] |
"""Llama Dataset Class."""
import asyncio
import time
from typing import List, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.bridge.pydantic import Field
from llama_index.core.llama_dataset.base import (
BaseLlamaDataExample,
BaseLlamaDataset,
BaseLlamaExamplePrediction,
BaseLlamaPredictionDataset,
CreatedBy,
)
from pandas import DataFrame as PandasDataFrame
class RagExamplePrediction(BaseLlamaExamplePrediction):
"""RAG example prediction class.
Args:
response (str): The response generated by the LLM.
contexts (Optional[List[str]]): The retrieved context (text) for generating
response.
"""
response: str = Field(
default_factory=str,
description="The generated (predicted) response that can be compared to a reference (ground-truth) answer.",
)
contexts: Optional[List[str]] = Field(
default_factory=None,
description="The contexts in raw text form used to generate the response.",
)
@property
def class_name(self) -> str:
"""Data example class name."""
return "RagExamplePrediction"
class LabelledRagDataExample(BaseLlamaDataExample):
"""RAG example class. Analogous to traditional ML datasets, this dataset contains
the "features" (i.e., query + context) to make a prediction and the "label" (i.e., response)
to evaluate the prediction.
Args:
query (str): The user query
query_by (CreatedBy): Query generated by human or ai (model-name)
reference_contexts (Optional[List[str]]): The contexts used for response
reference_answer ([str]): Reference answer to the query. An answer
that would receive full marks upon evaluation.
reference_answer_by: The reference answer generated by human or ai (model-name).
"""
query: str = Field(
default_factory=str, description="The user query for the example."
)
query_by: Optional[CreatedBy] = Field(
default=None, description="What generated the query."
)
reference_contexts: Optional[List[str]] = Field(
default_factory=None,
description="The contexts used to generate the reference answer.",
)
reference_answer: str = Field(
default_factory=str,
description="The reference (ground-truth) answer to the example.",
)
reference_answer_by: Optional[CreatedBy] = Field(
default=None, description="What generated the reference answer."
)
@property
def class_name(self) -> str:
"""Data example class name."""
return "LabelledRagDataExample"
class RagPredictionDataset(BaseLlamaPredictionDataset):
"""RagDataset class."""
_prediction_type = RagExamplePrediction
def to_pandas(self) -> PandasDataFrame:
"""Create pandas dataframe."""
data = {}
if self.predictions:
data = {
"response": [t.response for t in self.predictions],
"contexts": [t.contexts for t in self.predictions],
}
return PandasDataFrame(data)
@property
def class_name(self) -> str:
"""Class name."""
return "RagPredictionDataset"
class LabelledRagDataset(BaseLlamaDataset[BaseQueryEngine]):
"""RagDataset class."""
_example_type = LabelledRagDataExample
def to_pandas(self) -> PandasDataFrame:
"""Create pandas dataframe."""
data = {
"query": [t.query for t in self.examples],
"reference_contexts": [t.reference_contexts for t in self.examples],
"reference_answer": [t.reference_answer for t in self.examples],
"reference_answer_by": [str(t.reference_answer_by) for t in self.examples],
"query_by": [str(t.query_by) for t in self.examples],
}
return PandasDataFrame(data)
async def _apredict_example(
self,
predictor: BaseQueryEngine,
example: LabelledRagDataExample,
sleep_time_in_seconds: int,
) -> RagExamplePrediction:
"""Async predict RAG example with a query engine."""
await asyncio.sleep(sleep_time_in_seconds)
response = await predictor.aquery(example.query)
return RagExamplePrediction(
response=str(response), contexts=[s.text for s in response.source_nodes]
)
def _predict_example(
self,
predictor: BaseQueryEngine,
example: LabelledRagDataExample,
sleep_time_in_seconds: int = 0,
) -> RagExamplePrediction:
"""Predict RAG example with a query engine."""
time.sleep(sleep_time_in_seconds)
response = predictor.query(example.query)
return RagExamplePrediction(
response=str(response), contexts=[s.text for s in response.source_nodes]
)
def _construct_prediction_dataset(
self, predictions: List[RagExamplePrediction]
) -> RagPredictionDataset:
"""Construct prediction dataset."""
return RagPredictionDataset(predictions=predictions)
@property
def class_name(self) -> str:
"""Class name."""
return "LabelledRagDataset"
# British English + American English
LabeledRagDataExample = LabelledRagDataExample
LabeledRagDataset = LabelledRagDataset
| [
"llama_index.core.bridge.pydantic.Field"
] | [((764, 909), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The generated (predicted) response that can be compared to a reference (ground-truth) answer."""'}), "(default_factory=str, description=\n 'The generated (predicted) response that can be compared to a reference (ground-truth) answer.'\n )\n", (769, 909), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((959, 1067), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'None', 'description': '"""The contexts in raw text form used to generate the response."""'}), "(default_factory=None, description=\n 'The contexts in raw text form used to generate the response.')\n", (964, 1067), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1955, 2028), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The user query for the example."""'}), "(default_factory=str, description='The user query for the example.')\n", (1960, 2028), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2079, 2139), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""What generated the query."""'}), "(default=None, description='What generated the query.')\n", (2084, 2139), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2200, 2299), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'None', 'description': '"""The contexts used to generate the reference answer."""'}), "(default_factory=None, description=\n 'The contexts used to generate the reference answer.')\n", (2205, 2299), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2346, 2444), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The reference (ground-truth) answer to the example."""'}), "(default_factory=str, description=\n 'The reference (ground-truth) answer to the example.')\n", (2351, 2444), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2510, 2581), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""What generated the reference answer."""'}), "(default=None, description='What generated the reference answer.')\n", (2515, 2581), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((3172, 3193), 'pandas.DataFrame', 'PandasDataFrame', (['data'], {}), '(data)\n', (3187, 3193), True, 'from pandas import DataFrame as PandasDataFrame\n'), ((3935, 3956), 'pandas.DataFrame', 'PandasDataFrame', (['data'], {}), '(data)\n', (3950, 3956), True, 'from pandas import DataFrame as PandasDataFrame\n'), ((4702, 4735), 'time.sleep', 'time.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4712, 4735), False, 'import time\n'), ((4224, 4260), 'asyncio.sleep', 'asyncio.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4237, 4260), False, 'import asyncio\n')] |
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
MessageRole,
)
from llama_index.core.types import TokenGen
def response_gen_from_query_engine(response_gen: TokenGen) -> ChatResponseGen:
response_str = ""
for token in response_gen:
response_str += token
yield ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=response_str),
delta=token,
)
| [
"llama_index.core.base.llms.types.ChatMessage"
] | [((378, 439), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response_str'}), '(role=MessageRole.ASSISTANT, content=response_str)\n', (389, 439), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponse, ChatResponseGen, MessageRole\n')] |
from typing import Dict, Any
import asyncio
# Create a new event loop
loop = asyncio.new_event_loop()
# Set the event loop as the current event loop
asyncio.set_event_loop(loop)
from llama_index import (
VectorStoreIndex,
ServiceContext,
download_loader,
)
from llama_index.llama_pack.base import BaseLlamaPack
from llama_index.llms import OpenAI
import streamlit as st
from streamlit_pills import pills
st.set_page_config(
page_title=f"Chat with Snowflake's Wikipedia page, powered by LlamaIndex",
page_icon="🦙",
layout="centered",
initial_sidebar_state="auto",
menu_items=None,
)
if "messages" not in st.session_state: # Initialize the chat messages history
st.session_state["messages"] = [
{"role": "assistant", "content": "Ask me a question about Snowflake!"}
]
st.title(
f"Chat with Snowflake's Wikipedia page, powered by LlamaIndex 💬🦙"
)
st.info(
"This example is powered by the **[Llama Hub Wikipedia Loader](https://llamahub.ai/l/wikipedia)**. Use any of [Llama Hub's many loaders](https://llamahub.ai/) to retrieve and chat with your data via a Streamlit app.",
icon="ℹ️",
)
def add_to_message_history(role, content):
message = {"role": role, "content": str(content)}
st.session_state["messages"].append(
message
) # Add response to message history
@st.cache_resource
def load_index_data():
WikipediaReader = download_loader(
"WikipediaReader", custom_path="local_dir"
)
loader = WikipediaReader()
docs = loader.load_data(pages=["Snowflake Inc."])
service_context = ServiceContext.from_defaults(
llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5)
)
index = VectorStoreIndex.from_documents(
docs, service_context=service_context
)
return index
index = load_index_data()
selected = pills(
"Choose a question to get started or write your own below.",
[
"What is Snowflake?",
"What company did Snowflake announce they would acquire in October 2023?",
"What company did Snowflake acquire in March 2022?",
"When did Snowflake IPO?",
],
clearable=True,
index=None,
)
if "chat_engine" not in st.session_state: # Initialize the query engine
st.session_state["chat_engine"] = index.as_chat_engine(
chat_mode="context", verbose=True
)
for message in st.session_state["messages"]: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# To avoid duplicated display of answered pill questions each rerun
if selected and selected not in st.session_state.get(
"displayed_pill_questions", set()
):
st.session_state.setdefault("displayed_pill_questions", set()).add(selected)
with st.chat_message("user"):
st.write(selected)
with st.chat_message("assistant"):
response = st.session_state["chat_engine"].stream_chat(selected)
response_str = ""
response_container = st.empty()
for token in response.response_gen:
response_str += token
response_container.write(response_str)
add_to_message_history("user", selected)
add_to_message_history("assistant", response)
if prompt := st.chat_input(
"Your question"
): # Prompt for user input and save to chat history
add_to_message_history("user", prompt)
# Display the new question immediately after it is entered
with st.chat_message("user"):
st.write(prompt)
# If last message is not from assistant, generate a new response
# if st.session_state["messages"][-1]["role"] != "assistant":
with st.chat_message("assistant"):
response = st.session_state["chat_engine"].stream_chat(prompt)
response_str = ""
response_container = st.empty()
for token in response.response_gen:
response_str += token
response_container.write(response_str)
# st.write(response.response)
add_to_message_history("assistant", response.response)
# Save the state of the generator
st.session_state["response_gen"] = response.response_gen
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.llms.OpenAI",
"llama_index.download_loader"
] | [((78, 102), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (100, 102), False, 'import asyncio\n'), ((151, 179), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (173, 179), False, 'import asyncio\n'), ((420, 607), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'f"""Chat with Snowflake\'s Wikipedia page, powered by LlamaIndex"""', 'page_icon': '"""🦙"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), '(page_title=\n f"Chat with Snowflake\'s Wikipedia page, powered by LlamaIndex",\n page_icon=\'🦙\', layout=\'centered\', initial_sidebar_state=\'auto\',\n menu_items=None)\n', (438, 607), True, 'import streamlit as st\n'), ((821, 896), 'streamlit.title', 'st.title', (['f"""Chat with Snowflake\'s Wikipedia page, powered by LlamaIndex 💬🦙"""'], {}), '(f"Chat with Snowflake\'s Wikipedia page, powered by LlamaIndex 💬🦙")\n', (829, 896), True, 'import streamlit as st\n'), ((903, 1149), 'streamlit.info', 'st.info', (['"""This example is powered by the **[Llama Hub Wikipedia Loader](https://llamahub.ai/l/wikipedia)**. Use any of [Llama Hub\'s many loaders](https://llamahub.ai/) to retrieve and chat with your data via a Streamlit app."""'], {'icon': '"""ℹ️"""'}), '(\n "This example is powered by the **[Llama Hub Wikipedia Loader](https://llamahub.ai/l/wikipedia)**. Use any of [Llama Hub\'s many loaders](https://llamahub.ai/) to retrieve and chat with your data via a Streamlit app."\n , icon=\'ℹ️\')\n', (910, 1149), True, 'import streamlit as st\n'), ((1841, 2131), 'streamlit_pills.pills', 'pills', (['"""Choose a question to get started or write your own below."""', "['What is Snowflake?',\n 'What company did Snowflake announce they would acquire in October 2023?',\n 'What company did Snowflake acquire in March 2022?',\n 'When did Snowflake IPO?']"], {'clearable': '(True)', 'index': 'None'}), "('Choose a question to get started or write your own below.', [\n 'What is Snowflake?',\n 'What company did Snowflake announce they would acquire in October 2023?',\n 'What company did Snowflake acquire in March 2022?',\n 'When did Snowflake IPO?'], clearable=True, index=None)\n", (1846, 2131), False, 'from streamlit_pills import pills\n'), ((1412, 1471), 'llama_index.download_loader', 'download_loader', (['"""WikipediaReader"""'], {'custom_path': '"""local_dir"""'}), "('WikipediaReader', custom_path='local_dir')\n", (1427, 1471), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((1700, 1770), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (1731, 1770), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((3246, 3276), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (3259, 3276), True, 'import streamlit as st\n'), ((2445, 2477), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2460, 2477), True, 'import streamlit as st\n'), ((2487, 2515), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2495, 2515), True, 'import streamlit as st\n'), ((2770, 2793), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (2785, 2793), True, 'import streamlit as st\n'), ((2803, 2821), 'streamlit.write', 'st.write', (['selected'], {}), '(selected)\n', (2811, 2821), True, 'import streamlit as st\n'), ((2831, 2859), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2846, 2859), True, 'import streamlit as st\n'), ((2989, 2999), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (2997, 2999), True, 'import streamlit as st\n'), ((3450, 3473), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (3465, 3473), True, 'import streamlit as st\n'), ((3483, 3499), 'streamlit.write', 'st.write', (['prompt'], {}), '(prompt)\n', (3491, 3499), True, 'import streamlit as st\n'), ((3645, 3673), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (3660, 3673), True, 'import streamlit as st\n'), ((3801, 3811), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (3809, 3811), True, 'import streamlit as st\n'), ((1635, 1681), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)'}), "(model='gpt-3.5-turbo', temperature=0.5)\n", (1641, 1681), False, 'from llama_index.llms import OpenAI\n')] |
"""DashScope llm api."""
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
MessageRole,
)
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.llms.dashscope_utils import (
chat_message_to_dashscope_messages,
dashscope_response_to_chat_response,
dashscope_response_to_completion_response,
)
class DashScopeGenerationModels:
"""DashScope Qwen serial models."""
QWEN_TURBO = "qwen-turbo"
QWEN_PLUS = "qwen-plus"
QWEN_MAX = "qwen-max"
QWEN_MAX_1201 = "qwen-max-1201"
QWEN_MAX_LONGCONTEXT = "qwen-max-longcontext"
DASHSCOPE_MODEL_META = {
DashScopeGenerationModels.QWEN_TURBO: {
"context_window": 1024 * 8,
"num_output": 1024 * 8,
"is_chat_model": True,
},
DashScopeGenerationModels.QWEN_PLUS: {
"context_window": 1024 * 32,
"num_output": 1024 * 32,
"is_chat_model": True,
},
DashScopeGenerationModels.QWEN_MAX: {
"context_window": 1024 * 8,
"num_output": 1024 * 8,
"is_chat_model": True,
},
DashScopeGenerationModels.QWEN_MAX_1201: {
"context_window": 1024 * 8,
"num_output": 1024 * 8,
"is_chat_model": True,
},
DashScopeGenerationModels.QWEN_MAX_LONGCONTEXT: {
"context_window": 1024 * 30,
"num_output": 1024 * 30,
"is_chat_model": True,
},
}
def call_with_messages(
model: str,
messages: List[Dict],
parameters: Optional[Dict] = None,
api_key: Optional[str] = None,
**kwargs: Any,
) -> Dict:
try:
from dashscope import Generation
except ImportError:
raise ValueError(
"DashScope is not installed. Please install it with "
"`pip install dashscope`."
)
return Generation.call(
model=model, messages=messages, api_key=api_key, **parameters
)
class DashScope(CustomLLM):
"""DashScope LLM."""
model_name: str = Field(
default=DashScopeGenerationModels.QWEN_MAX,
description="The DashScope model to use.",
)
max_tokens: Optional[int] = Field(
description="The maximum number of tokens to generate.",
default=DEFAULT_NUM_OUTPUTS,
gt=0,
)
incremental_output: Optional[bool] = Field(
description="Control stream output, If False, the subsequent \
output will include the content that has been \
output previously.",
default=True,
)
enable_search: Optional[bool] = Field(
description="The model has a built-in Internet search service. \
This parameter controls whether the model refers to \
the Internet search results when generating text.",
default=False,
)
stop: Optional[Any] = Field(
description="str, list of str or token_id, list of token id. It will automatically \
stop when the generated content is about to contain the specified string \
or token_ids, and the generated content does not contain \
the specified content.",
default=None,
)
temperature: Optional[float] = Field(
description="The temperature to use during generation.",
default=DEFAULT_TEMPERATURE,
gte=0.0,
lte=2.0,
)
top_k: Optional[int] = Field(
description="Sample counter when generate.", default=None
)
top_p: Optional[float] = Field(
description="Sample probability threshold when generate."
)
seed: Optional[int] = Field(
description="Random seed when generate.", default=1234, gte=0
)
repetition_penalty: Optional[float] = Field(
description="Penalty for repeated words in generated text; \
1.0 is no penalty, values greater than 1 discourage \
repetition.",
default=None,
)
api_key: str = Field(
default=None, description="The DashScope API key.", exclude=True
)
def __init__(
self,
model_name: Optional[str] = DashScopeGenerationModels.QWEN_MAX,
max_tokens: Optional[int] = DEFAULT_NUM_OUTPUTS,
incremental_output: Optional[int] = True,
enable_search: Optional[bool] = False,
stop: Optional[Any] = None,
temperature: Optional[float] = DEFAULT_TEMPERATURE,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
seed: Optional[int] = 1234,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
max_tokens=max_tokens,
incremental_output=incremental_output,
enable_search=enable_search,
stop=stop,
temperature=temperature,
top_k=top_k,
top_p=top_p,
seed=seed,
api_key=api_key,
callback_manager=callback_manager,
kwargs=kwargs,
)
@classmethod
def class_name(cls) -> str:
return "DashScope_LLM"
@property
def metadata(self) -> LLMMetadata:
DASHSCOPE_MODEL_META[self.model_name]["num_output"] = (
self.max_tokens or DASHSCOPE_MODEL_META[self.model_name]["num_output"]
)
return LLMMetadata(
model_name=self.model_name, **DASHSCOPE_MODEL_META[self.model_name]
)
def _get_default_parameters(self) -> Dict:
params: Dict[Any, Any] = {}
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
params["incremental_output"] = self.incremental_output
params["enable_search"] = self.enable_search
if self.stop is not None:
params["stop"] = self.stop
if self.temperature is not None:
params["temperature"] = self.temperature
if self.top_k is not None:
params["top_k"] = self.top_k
if self.top_p is not None:
params["top_p"] = self.top_p
if self.seed is not None:
params["seed"] = self.seed
return params
def _get_input_parameters(
self, prompt: str, **kwargs: Any
) -> Tuple[ChatMessage, Dict]:
parameters = self._get_default_parameters()
parameters.update(kwargs)
parameters["stream"] = False
# we only use message response
parameters["result_format"] = "message"
message = ChatMessage(
role=MessageRole.USER.value,
content=prompt,
)
return message, parameters
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
message, parameters = self._get_input_parameters(prompt=prompt, **kwargs)
parameters.pop("incremental_output", None)
parameters.pop("stream", None)
messages = chat_message_to_dashscope_messages([message])
response = call_with_messages(
model=self.model_name,
messages=messages,
api_key=self.api_key,
parameters=parameters,
)
return dashscope_response_to_completion_response(response)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
message, parameters = self._get_input_parameters(prompt=prompt, kwargs=kwargs)
parameters["incremental_output"] = True
parameters["stream"] = True
responses = call_with_messages(
model=self.model_name,
messages=chat_message_to_dashscope_messages([message]),
api_key=self.api_key,
parameters=parameters,
)
def gen() -> CompletionResponseGen:
content = ""
for response in responses:
if response.status_code == HTTPStatus.OK:
top_choice = response.output.choices[0]
incremental_output = top_choice["message"]["content"]
if not incremental_output:
incremental_output = ""
content += incremental_output
yield CompletionResponse(
text=content, delta=incremental_output, raw=response
)
else:
yield CompletionResponse(text="", raw=response)
return
return gen()
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
parameters = self._get_default_parameters()
parameters.update({**kwargs})
parameters.pop("stream", None)
parameters.pop("incremental_output", None)
parameters["result_format"] = "message" # only use message format.
response = call_with_messages(
model=self.model_name,
messages=chat_message_to_dashscope_messages(messages),
api_key=self.api_key,
parameters=parameters,
)
return dashscope_response_to_chat_response(response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
parameters = self._get_default_parameters()
parameters.update({**kwargs})
parameters["stream"] = True
parameters["incremental_output"] = True
parameters["result_format"] = "message" # only use message format.
response = call_with_messages(
model=self.model_name,
messages=chat_message_to_dashscope_messages(messages),
api_key=self.api_key,
parameters=parameters,
)
def gen() -> ChatResponseGen:
content = ""
for r in response:
if r.status_code == HTTPStatus.OK:
top_choice = r.output.choices[0]
incremental_output = top_choice["message"]["content"]
role = top_choice["message"]["role"]
content += incremental_output
yield ChatResponse(
message=ChatMessage(role=role, content=content),
delta=incremental_output,
raw=r,
)
else:
yield ChatResponse(message=ChatMessage(), raw=response)
return
return gen()
| [
"llama_index.legacy.core.llms.types.CompletionResponse",
"llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages",
"llama_index.legacy.llms.dashscope_utils.dashscope_response_to_chat_response",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.dashscope_utils.dashscope_response_to_completion_response"
] | [((2272, 2350), 'dashscope.Generation.call', 'Generation.call', ([], {'model': 'model', 'messages': 'messages', 'api_key': 'api_key'}), '(model=model, messages=messages, api_key=api_key, **parameters)\n', (2287, 2350), False, 'from dashscope import Generation\n'), ((2443, 2540), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DashScopeGenerationModels.QWEN_MAX', 'description': '"""The DashScope model to use."""'}), "(default=DashScopeGenerationModels.QWEN_MAX, description=\n 'The DashScope model to use.')\n", (2448, 2540), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2591, 2693), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""', 'default': 'DEFAULT_NUM_OUTPUTS', 'gt': '(0)'}), "(description='The maximum number of tokens to generate.', default=\n DEFAULT_NUM_OUTPUTS, gt=0)\n", (2596, 2693), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2761, 3038), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Control stream output, If False, the subsequent output will include the content that has been output previously."""', 'default': '(True)'}), "(description=\n 'Control stream output, If False, the subsequent output will include the content that has been output previously.'\n , default=True)\n", (2766, 3038), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3092, 3409), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The model has a built-in Internet search service. This parameter controls whether the model refers to the Internet search results when generating text."""', 'default': '(False)'}), "(description=\n 'The model has a built-in Internet search service. This parameter controls whether the model refers to the Internet search results when generating text.'\n , default=False)\n", (3097, 3409), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3453, 3855), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""str, list of str or token_id, list of token id. It will automatically stop when the generated content is about to contain the specified string or token_ids, and the generated content does not contain the specified content."""', 'default': 'None'}), "(description=\n 'str, list of str or token_id, list of token id. It will automatically stop when the generated content is about to contain the specified string or token_ids, and the generated content does not contain the specified content.'\n , default=None)\n", (3458, 3855), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3910, 4024), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use during generation."""', 'default': 'DEFAULT_TEMPERATURE', 'gte': '(0.0)', 'lte': '(2.0)'}), "(description='The temperature to use during generation.', default=\n DEFAULT_TEMPERATURE, gte=0.0, lte=2.0)\n", (3915, 4024), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4086, 4150), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Sample counter when generate."""', 'default': 'None'}), "(description='Sample counter when generate.', default=None)\n", (4091, 4150), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4194, 4258), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Sample probability threshold when generate."""'}), "(description='Sample probability threshold when generate.')\n", (4199, 4258), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4299, 4367), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Random seed when generate."""', 'default': '(1234)', 'gte': '(0)'}), "(description='Random seed when generate.', default=1234, gte=0)\n", (4304, 4367), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4424, 4700), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Penalty for repeated words in generated text; 1.0 is no penalty, values greater than 1 discourage repetition."""', 'default': 'None'}), "(description=\n 'Penalty for repeated words in generated text; 1.0 is no penalty, values greater than 1 discourage repetition.'\n , default=None)\n", (4429, 4700), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4737, 4808), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The DashScope API key."""', 'exclude': '(True)'}), "(default=None, description='The DashScope API key.', exclude=True)\n", (4742, 4808), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((7440, 7465), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7463, 7465), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8034, 8059), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8057, 8059), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9274, 9293), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9291, 9293), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9921, 9940), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9938, 9940), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6160, 6245), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'model_name': 'self.model_name'}), '(model_name=self.model_name, **DASHSCOPE_MODEL_META[self.model_name]\n )\n', (6171, 6245), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7307, 7363), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER.value', 'content': 'prompt'}), '(role=MessageRole.USER.value, content=prompt)\n', (7318, 7363), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7731, 7776), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['[message]'], {}), '([message])\n', (7765, 7776), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((7976, 8027), 'llama_index.legacy.llms.dashscope_utils.dashscope_response_to_completion_response', 'dashscope_response_to_completion_response', (['response'], {}), '(response)\n', (8017, 8027), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9869, 9914), 'llama_index.legacy.llms.dashscope_utils.dashscope_response_to_chat_response', 'dashscope_response_to_chat_response', (['response'], {}), '(response)\n', (9904, 9914), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((8411, 8456), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['[message]'], {}), '([message])\n', (8445, 8456), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9729, 9773), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['messages'], {}), '(messages)\n', (9763, 9773), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((10394, 10438), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['messages'], {}), '(messages)\n', (10428, 10438), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9010, 9082), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'incremental_output', 'raw': 'response'}), '(text=content, delta=incremental_output, raw=response)\n', (9028, 9082), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((9177, 9218), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': '""""""', 'raw': 'response'}), "(text='', raw=response)\n", (9195, 9218), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10971, 11010), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10982, 11010), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((11184, 11197), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {}), '()\n', (11195, 11197), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n')] |
import os
from llama_index import download_loader
from llama_index.node_parser import SimpleNodeParser
from llama_index import GPTVectorStoreIndex
download_loader("GithubRepositoryReader")
from llama_index.readers.llamahub_modules.github_repo import (
GithubRepositoryReader,
GithubClient,
)
# Initialize the GithubRepositoryReader
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
loader = GithubRepositoryReader(
github_client,
owner="jerryjliu",
repo="llama_index",
filter_directories=(
["llama_index", "docs"],
GithubRepositoryReader.FilterType.INCLUDE,
),
filter_file_extensions=([".py"], GithubRepositoryReader.FilterType.INCLUDE),
verbose=True,
concurrent_requests=10,
)
# 1. Load the documents
docs = loader.load_data(branch="main")
# 2. Parse the docs into nodes
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(docs)
# 3. Build an index
# You can customize the LLM. By default it uses `text-davinci-003`
index = GPTVectorStoreIndex(nodes)
# 4. Persist the index
index.storage_context.persist(persist_dir="index")
| [
"llama_index.readers.llamahub_modules.github_repo.GithubRepositoryReader",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.GPTVectorStoreIndex",
"llama_index.download_loader"
] | [((149, 190), 'llama_index.download_loader', 'download_loader', (['"""GithubRepositoryReader"""'], {}), "('GithubRepositoryReader')\n", (164, 190), False, 'from llama_index import download_loader\n'), ((409, 706), 'llama_index.readers.llamahub_modules.github_repo.GithubRepositoryReader', 'GithubRepositoryReader', (['github_client'], {'owner': '"""jerryjliu"""', 'repo': '"""llama_index"""', 'filter_directories': "(['llama_index', 'docs'], GithubRepositoryReader.FilterType.INCLUDE)", 'filter_file_extensions': "(['.py'], GithubRepositoryReader.FilterType.INCLUDE)", 'verbose': '(True)', 'concurrent_requests': '(10)'}), "(github_client, owner='jerryjliu', repo='llama_index',\n filter_directories=(['llama_index', 'docs'], GithubRepositoryReader.\n FilterType.INCLUDE), filter_file_extensions=(['.py'],\n GithubRepositoryReader.FilterType.INCLUDE), verbose=True,\n concurrent_requests=10)\n", (431, 706), False, 'from llama_index.readers.llamahub_modules.github_repo import GithubRepositoryReader, GithubClient\n'), ((849, 867), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (865, 867), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1010, 1036), 'llama_index.GPTVectorStoreIndex', 'GPTVectorStoreIndex', (['nodes'], {}), '(nodes)\n', (1029, 1036), False, 'from llama_index import GPTVectorStoreIndex\n'), ((373, 398), 'os.getenv', 'os.getenv', (['"""GITHUB_TOKEN"""'], {}), "('GITHUB_TOKEN')\n", (382, 398), False, 'import os\n')] |
"""Relevancy evaluation."""
from __future__ import annotations
import asyncio
from typing import Any, Optional, Sequence, Union
from llama_index.core import ServiceContext
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.indices import SummaryIndex
from llama_index.core.llms.llm import LLM
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.schema import Document
from llama_index.core.settings import Settings, llm_from_settings_or_context
DEFAULT_EVAL_TEMPLATE = PromptTemplate(
"Your task is to evaluate if the response for the query \
is in line with the context information provided.\n"
"You have two options to answer. Either YES/ NO.\n"
"Answer - YES, if the response for the query \
is in line with context information otherwise NO.\n"
"Query and Response: \n {query_str}\n"
"Context: \n {context_str}\n"
"Answer: "
)
DEFAULT_REFINE_TEMPLATE = PromptTemplate(
"We want to understand if the following query and response is"
"in line with the context information: \n {query_str}\n"
"We have provided an existing YES/NO answer: \n {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"If the existing answer was already YES, still answer YES. "
"If the information is present in the new context, answer YES. "
"Otherwise answer NO.\n"
)
class RelevancyEvaluator(BaseEvaluator):
"""Relenvancy evaluator.
Evaluates the relevancy of retrieved contexts and response to a query.
This evaluator considers the query string, retrieved contexts, and response string.
Args:
service_context(Optional[ServiceContext]):
The service context to use for evaluation.
raise_error(Optional[bool]):
Whether to raise an error if the response is invalid.
Defaults to False.
eval_template(Optional[Union[str, BasePromptTemplate]]):
The template to use for evaluation.
refine_template(Optional[Union[str, BasePromptTemplate]]):
The template to use for refinement.
"""
def __init__(
self,
llm: Optional[LLM] = None,
raise_error: bool = False,
eval_template: Optional[Union[str, BasePromptTemplate]] = None,
refine_template: Optional[Union[str, BasePromptTemplate]] = None,
# deprecated
service_context: Optional[ServiceContext] = None,
) -> None:
"""Init params."""
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
self._raise_error = raise_error
self._eval_template: BasePromptTemplate
if isinstance(eval_template, str):
self._eval_template = PromptTemplate(eval_template)
else:
self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
self._refine_template: BasePromptTemplate
if isinstance(refine_template, str):
self._refine_template = PromptTemplate(refine_template)
else:
self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"eval_template": self._eval_template,
"refine_template": self._refine_template,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "eval_template" in prompts:
self._eval_template = prompts["eval_template"]
if "refine_template" in prompts:
self._refine_template = prompts["refine_template"]
async def aevaluate(
self,
query: str | None = None,
response: str | None = None,
contexts: Sequence[str] | None = None,
sleep_time_in_seconds: int = 0,
**kwargs: Any,
) -> EvaluationResult:
"""Evaluate whether the contexts and response are relevant to the query."""
del kwargs # Unused
if query is None or contexts is None or response is None:
raise ValueError("query, contexts, and response must be provided")
docs = [Document(text=context) for context in contexts]
index = SummaryIndex.from_documents(docs)
query_response = f"Question: {query}\nResponse: {response}"
await asyncio.sleep(sleep_time_in_seconds)
query_engine = index.as_query_engine(
llm=self._llm,
text_qa_template=self._eval_template,
refine_template=self._refine_template,
)
response_obj = await query_engine.aquery(query_response)
raw_response_txt = str(response_obj)
if "yes" in raw_response_txt.lower():
passing = True
else:
if self._raise_error:
raise ValueError("The response is invalid")
passing = False
return EvaluationResult(
query=query,
response=response,
passing=passing,
score=1.0 if passing else 0.0,
feedback=raw_response_txt,
contexts=contexts,
)
QueryResponseEvaluator = RelevancyEvaluator
| [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.evaluation.base.EvaluationResult",
"llama_index.core.schema.Document",
"llama_index.core.indices.SummaryIndex.from_documents"
] | [((620, 974), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Your task is to evaluate if the response for the query is in line with the context information provided.\nYou have two options to answer. Either YES/ NO.\nAnswer - YES, if the response for the query is in line with context information otherwise NO.\nQuery and Response: \n {query_str}\nContext: \n {context_str}\nAnswer: """'], {}), '(\n """Your task is to evaluate if the response for the query is in line with the context information provided.\nYou have two options to answer. Either YES/ NO.\nAnswer - YES, if the response for the query is in line with context information otherwise NO.\nQuery and Response: \n {query_str}\nContext: \n {context_str}\nAnswer: """\n )\n', (634, 974), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((1040, 1530), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""We want to understand if the following query and response isin line with the context information: \n {query_str}\nWe have provided an existing YES/NO answer: \n {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""'], {}), '(\n """We want to understand if the following query and response isin line with the context information: \n {query_str}\nWe have provided an existing YES/NO answer: \n {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""\n )\n', (1054, 1530), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4408, 4441), 'llama_index.core.indices.SummaryIndex.from_documents', 'SummaryIndex.from_documents', (['docs'], {}), '(docs)\n', (4435, 4441), False, 'from llama_index.core.indices import SummaryIndex\n'), ((5085, 5231), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'query': 'query', 'response': 'response', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt', 'contexts': 'contexts'}), '(query=query, response=response, passing=passing, score=1.0 if\n passing else 0.0, feedback=raw_response_txt, contexts=contexts)\n', (5101, 5231), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((2722, 2777), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (2750, 2777), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n'), ((2944, 2973), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['eval_template'], {}), '(eval_template)\n', (2958, 2973), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((3193, 3224), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['refine_template'], {}), '(refine_template)\n', (3207, 3224), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4344, 4366), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'context'}), '(text=context)\n', (4352, 4366), False, 'from llama_index.core.schema import Document\n'), ((4526, 4562), 'asyncio.sleep', 'asyncio.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4539, 4562), False, 'import asyncio\n')] |
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.tools.utils import create_schema_from_function
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
spec_functions = spec_functions or self.spec_functions
for fn in spec_functions:
if fn == fn_name:
return create_schema_from_function(fn_name, getattr(self, fn_name))
raise ValueError(f"Invalid function name: {fn_name}")
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
try:
func = getattr(self, fn_name)
except AttributeError:
return None
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
if func_sync is None:
if func_async is not None:
func_sync = patch_sync(func_async)
else:
raise ValueError(
f"Could not retrieve a function for spec: {func_spec}"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
def patch_sync(func_async: AsyncCallable) -> Callable:
"""Patch sync function from async function."""
def patched_sync(*args: Any, **kwargs: Any) -> Any:
loop = asyncio.get_event_loop()
return loop.run_until_complete(func_async(*args, **kwargs))
return patched_sync
| [
"llama_index.core.tools.function_tool.FunctionTool.from_defaults",
"llama_index.core.tools.types.ToolMetadata"
] | [((2092, 2161), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description', 'fn_schema': 'fn_schema'}), '(name=name, description=description, fn_schema=fn_schema)\n', (2104, 2161), False, 'from llama_index.core.tools.types import ToolMetadata\n'), ((4457, 4481), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4479, 4481), False, 'import asyncio\n'), ((4068, 4158), 'llama_index.core.tools.function_tool.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'func_sync', 'async_fn': 'func_async', 'tool_metadata': 'metadata'}), '(fn=func_sync, async_fn=func_async, tool_metadata\n =metadata)\n', (4094, 4158), False, 'from llama_index.core.tools.function_tool import FunctionTool\n'), ((1932, 1947), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (1941, 1947), False, 'from inspect import signature\n'), ((2783, 2816), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (2810, 2816), False, 'import asyncio\n')] |
"""Tree Index inserter."""
from typing import Optional, Sequence
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.indices.tree.utils import get_numbered_text_from_nodes
from llama_index.core.indices.utils import (
extract_numbers_given_response,
get_sorted_node_list,
)
from llama_index.core.llms.llm import LLM
from llama_index.core.prompts.base import BasePromptTemplate
from llama_index.core.prompts.default_prompts import (
DEFAULT_INSERT_PROMPT,
DEFAULT_SUMMARY_PROMPT,
)
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
llm_from_settings_or_context,
)
from llama_index.core.storage.docstore import BaseDocumentStore
from llama_index.core.storage.docstore.registry import get_default_docstore
class TreeIndexInserter:
"""LlamaIndex inserter."""
def __init__(
self,
index_graph: IndexGraph,
service_context: Optional[ServiceContext] = None,
llm: Optional[LLM] = None,
num_children: int = 10,
insert_prompt: BasePromptTemplate = DEFAULT_INSERT_PROMPT,
summary_prompt: BasePromptTemplate = DEFAULT_SUMMARY_PROMPT,
docstore: Optional[BaseDocumentStore] = None,
) -> None:
"""Initialize with params."""
if num_children < 2:
raise ValueError("Invalid number of children.")
self.num_children = num_children
self.summary_prompt = summary_prompt
self.insert_prompt = insert_prompt
self.index_graph = index_graph
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
self._prompt_helper = Settings._prompt_helper or PromptHelper.from_llm_metadata(
self._llm.metadata,
)
self._docstore = docstore or get_default_docstore()
def _insert_under_parent_and_consolidate(
self, text_node: BaseNode, parent_node: Optional[BaseNode]
) -> None:
"""Insert node under parent and consolidate.
Consolidation will happen by dividing up child nodes, and creating a new
intermediate layer of nodes.
"""
# perform insertion
self.index_graph.insert_under_parent(text_node, parent_node)
# if under num_children limit, then we're fine
if len(self.index_graph.get_children(parent_node)) <= self.num_children:
return
else:
# perform consolidation
cur_graph_node_ids = self.index_graph.get_children(parent_node)
cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids)
cur_graph_node_list = get_sorted_node_list(cur_graph_nodes)
# this layer is all leaf nodes, consolidate and split leaf nodes
# consolidate and split leaf nodes in half
# TODO: do better splitting (with a GPT prompt etc.)
half1 = cur_graph_node_list[: len(cur_graph_nodes) // 2]
half2 = cur_graph_node_list[len(cur_graph_nodes) // 2 :]
truncated_chunks = self._prompt_helper.truncate(
prompt=self.summary_prompt,
text_chunks=[
node.get_content(metadata_mode=MetadataMode.LLM) for node in half1
],
)
text_chunk1 = "\n".join(truncated_chunks)
summary1 = self._llm.predict(self.summary_prompt, context_str=text_chunk1)
node1 = TextNode(text=summary1)
self.index_graph.insert(node1, children_nodes=half1)
truncated_chunks = self._prompt_helper.truncate(
prompt=self.summary_prompt,
text_chunks=[
node.get_content(metadata_mode=MetadataMode.LLM) for node in half2
],
)
text_chunk2 = "\n".join(truncated_chunks)
summary2 = self._llm.predict(self.summary_prompt, context_str=text_chunk2)
node2 = TextNode(text=summary2)
self.index_graph.insert(node2, children_nodes=half2)
# insert half1 and half2 as new children of parent_node
# first remove child indices from parent node
if parent_node is not None:
self.index_graph.node_id_to_children_ids[parent_node.node_id] = []
else:
self.index_graph.root_nodes = {}
self.index_graph.insert_under_parent(
node1, parent_node, new_index=self.index_graph.get_index(node1)
)
self._docstore.add_documents([node1], allow_update=False)
self.index_graph.insert_under_parent(
node2, parent_node, new_index=self.index_graph.get_index(node2)
)
self._docstore.add_documents([node2], allow_update=False)
def _insert_node(
self, node: BaseNode, parent_node: Optional[BaseNode] = None
) -> None:
"""Insert node."""
cur_graph_node_ids = self.index_graph.get_children(parent_node)
cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids)
cur_graph_node_list = get_sorted_node_list(cur_graph_nodes)
# if cur_graph_nodes is empty (start with empty graph), then insert under
# parent (insert new root node)
if len(cur_graph_nodes) == 0:
self._insert_under_parent_and_consolidate(node, parent_node)
# check if leaf nodes, then just insert under parent
elif len(self.index_graph.get_children(cur_graph_node_list[0])) == 0:
self._insert_under_parent_and_consolidate(node, parent_node)
# else try to find the right summary node to insert under
else:
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
prompt=self.insert_prompt,
num_chunks=len(cur_graph_node_list),
)
numbered_text = get_numbered_text_from_nodes(
cur_graph_node_list, text_splitter=text_splitter
)
response = self._llm.predict(
self.insert_prompt,
new_chunk_text=node.get_content(metadata_mode=MetadataMode.LLM),
num_chunks=len(cur_graph_node_list),
context_list=numbered_text,
)
numbers = extract_numbers_given_response(response)
if numbers is None or len(numbers) == 0:
# NOTE: if we can't extract a number, then we just insert under parent
self._insert_under_parent_and_consolidate(node, parent_node)
elif int(numbers[0]) > len(cur_graph_node_list):
# NOTE: if number is out of range, then we just insert under parent
self._insert_under_parent_and_consolidate(node, parent_node)
else:
selected_node = cur_graph_node_list[int(numbers[0]) - 1]
self._insert_node(node, selected_node)
# now we need to update summary for parent node, since we
# need to bubble updated summaries up the tree
if parent_node is not None:
# refetch children
cur_graph_node_ids = self.index_graph.get_children(parent_node)
cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids)
cur_graph_node_list = get_sorted_node_list(cur_graph_nodes)
truncated_chunks = self._prompt_helper.truncate(
prompt=self.summary_prompt,
text_chunks=[
node.get_content(metadata_mode=MetadataMode.LLM)
for node in cur_graph_node_list
],
)
text_chunk = "\n".join(truncated_chunks)
new_summary = self._llm.predict(self.summary_prompt, context_str=text_chunk)
parent_node.set_content(new_summary)
def insert(self, nodes: Sequence[BaseNode]) -> None:
"""Insert into index_graph."""
for node in nodes:
self._insert_node(node)
| [
"llama_index.core.indices.tree.utils.get_numbered_text_from_nodes",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.storage.docstore.registry.get_default_docstore",
"llama_index.core.indices.utils.extract_numbers_given_response",
"llama_index.core.schema.TextNode",
"llama_index.core.indices.utils.get_sorted_node_list",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata"
] | [((5228, 5265), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (5248, 5265), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((1733, 1788), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (1761, 1788), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n'), ((1846, 1896), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (1876, 1896), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((1957, 1979), 'llama_index.core.storage.docstore.registry.get_default_docstore', 'get_default_docstore', ([], {}), '()\n', (1977, 1979), False, 'from llama_index.core.storage.docstore.registry import get_default_docstore\n'), ((2786, 2823), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (2806, 2823), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((3577, 3600), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'summary1'}), '(text=summary1)\n', (3585, 3600), False, 'from llama_index.core.schema import BaseNode, MetadataMode, TextNode\n'), ((4083, 4106), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'summary2'}), '(text=summary2)\n', (4091, 4106), False, 'from llama_index.core.schema import BaseNode, MetadataMode, TextNode\n'), ((7414, 7451), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (7434, 7451), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((6009, 6087), 'llama_index.core.indices.tree.utils.get_numbered_text_from_nodes', 'get_numbered_text_from_nodes', (['cur_graph_node_list'], {'text_splitter': 'text_splitter'}), '(cur_graph_node_list, text_splitter=text_splitter)\n', (6037, 6087), False, 'from llama_index.core.indices.tree.utils import get_numbered_text_from_nodes\n'), ((6410, 6450), 'llama_index.core.indices.utils.extract_numbers_given_response', 'extract_numbers_given_response', (['response'], {}), '(response)\n', (6440, 6450), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n')] |
"""JSON node parser."""
import json
from typing import Any, Dict, Generator, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
class JSONNodeParser(NodeParser):
"""JSON node parser.
Splits a document into Nodes using custom JSON splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "JSONNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "JSONNodeParser"
def _parse_nodes(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
try:
data = json.loads(text)
except json.JSONDecodeError:
# Handle invalid JSON input here
return []
json_nodes = []
if isinstance(data, dict):
lines = [*self._depth_first_yield(data, 0, [])]
json_nodes.extend(
build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func)
)
elif isinstance(data, list):
for json_object in data:
lines = [*self._depth_first_yield(json_object, 0, [])]
json_nodes.extend(
build_nodes_from_splits(
["\n".join(lines)], node, id_func=self.id_func
)
)
else:
raise ValueError("JSON is invalid")
return json_nodes
def _depth_first_yield(
self, json_data: Dict, levels_back: int, path: List[str]
) -> Generator[str, None, None]:
"""Do depth first yield of all of the leaf nodes of a JSON.
Combines keys in the JSON tree using spaces.
If levels_back is set to 0, prints all levels.
"""
if isinstance(json_data, dict):
for key, value in json_data.items():
new_path = path[:]
new_path.append(key)
yield from self._depth_first_yield(value, levels_back, new_path)
elif isinstance(json_data, list):
for _, value in enumerate(json_data):
yield from self._depth_first_yield(value, levels_back, path)
else:
new_path = path[-levels_back:]
new_path.append(str(json_data))
yield " ".join(new_path)
| [
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.utils.get_tqdm_iterable"
] | [((1510, 1566), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (1527, 1566), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((995, 1014), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1010, 1014), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((1928, 1944), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (1938, 1944), False, 'import json\n')] |
import asyncio
import os
import tempfile
import traceback
from datetime import date, datetime
from functools import partial
from pathlib import Path
import aiohttp
import discord
import openai
import tiktoken
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from llama_index import (
BeautifulSoupWebReader,
Document,
GPTVectorStoreIndex,
LLMPredictor,
MockEmbedding,
OpenAIEmbedding,
QuestionAnswerPrompt,
ResponseSynthesizer,
ServiceContext,
SimpleDirectoryReader,
)
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.composability import QASummaryQueryEngineBuilder
from llama_index.indices.query.query_transform import StepDecomposeQueryTransform
from llama_index.optimization import SentenceEmbeddingOptimizer
from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT
from llama_index.query_engine import MultiStepQueryEngine, RetrieverQueryEngine
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from llama_index.retrievers import VectorIndexRetriever
from services.environment_service import EnvService
from models.openai_model import Models
MAX_SEARCH_PRICE = EnvService.get_max_search_price()
class Search:
def __init__(self, gpt_model, usage_service):
self.model = gpt_model
self.usage_service = usage_service
self.google_search_api_key = EnvService.get_google_search_api_key()
self.google_search_engine_id = EnvService.get_google_search_engine_id()
self.loop = asyncio.get_running_loop()
self.qaprompt = QuestionAnswerPrompt(
"You are formulating the response to a search query given the search prompt and the context. Context information is below. The text '<|endofstatement|>' is used to separate chat entries and make it easier for you to understand the context\n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Never say '<|endofstatement|>'\n"
"Given the context information and not prior knowledge, "
"answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"
)
self.openai_key = os.getenv("OPENAI_TOKEN")
self.EMBED_CUTOFF = 2000
def add_search_index(self, index, user_id, query):
# Create a folder called "indexes/{USER_ID}" if it doesn't exist already
Path(f"{EnvService.save_path()}/indexes/{user_id}_search").mkdir(
parents=True, exist_ok=True
)
# Save the index to file under the user id
file = f"{date.today().month}_{date.today().day}_{query[:20]}"
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ f"{str(user_id)}_search"
/ f"{file}"
)
def build_search_started_embed(self):
embed = discord.Embed(
title="Searching the web...",
description="Refining google search query...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_refined_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n"
+ f"`{refined_query}`"
+ "\nRetrieving links from google...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_links_retrieved_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nRetrieving webpages...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_determining_price_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nPre-determining index price...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_webpages_retrieved_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`" "\nIndexing...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_indexed_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nThinking about your question...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_final_embed(self, refined_query, price):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nDone!\n||The total price was $" + price + "||",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def index_webpage(self, url) -> list[Document]:
documents = BeautifulSoupWebReader(
website_extractor=DEFAULT_WEBSITE_EXTRACTOR
).load_data(urls=[url])
return documents
async def index_pdf(self, url) -> list[Document]:
# Download the PDF at the url and save it to a tempfile
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
data = await response.read()
f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
f.write(data)
f.close()
else:
raise ValueError("Could not download PDF")
# Get the file path of this tempfile.NamedTemporaryFile
# Save this temp file to an actual file that we can put into something else to read it
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
for document in documents:
document.extra_info = {"URL": url}
# Delete the temporary file
return documents
async def get_links(self, query, search_scope=2):
"""Search the web for a query"""
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://www.googleapis.com/customsearch/v1?key={self.google_search_api_key}&cx={self.google_search_engine_id}&q={query}"
) as response:
if response.status == 200:
data = await response.json()
# Return a list of the top 2 links
return (
[item["link"] for item in data["items"][:search_scope]],
[item["link"] for item in data["items"]],
)
else:
raise ValueError(
"Error while retrieving links, the response returned "
+ str(response.status)
+ " with the message "
+ str(await response.text())
)
async def try_edit(self, message, embed):
try:
await message.edit(embed=embed)
except Exception:
traceback.print_exc()
pass
async def try_delete(self, message):
try:
await message.delete()
except Exception:
traceback.print_exc()
pass
async def search(
self,
ctx: discord.ApplicationContext,
query,
user_api_key,
search_scope,
nodes,
deep,
response_mode,
model,
multistep=False,
redo=None,
):
DEFAULT_SEARCH_NODES = 1
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
# Initialize the search cost
price = 0
if ctx:
in_progress_message = (
await ctx.respond(embed=self.build_search_started_embed())
if not redo
else await ctx.channel.send(embed=self.build_search_started_embed())
)
try:
llm_predictor_presearch = OpenAI(
max_tokens=50,
temperature=0.4,
presence_penalty=0.65,
model_name="text-davinci-003",
)
# Refine a query to send to google custom search API
prompt = f"You are to be given a search query for google. Change the query such that putting it into the Google Custom Search API will return the most relevant websites to assist in answering the original query. If the original query is inferring knowledge about the current day, insert the current day into the refined prompt. If the original query is inferring knowledge about the current month, insert the current month and year into the refined prompt. If the original query is inferring knowledge about the current year, insert the current year into the refined prompt. Generally, if the original query is inferring knowledge about something that happened recently, insert the current month into the refined query. Avoid inserting a day, month, or year for queries that purely ask about facts and about things that don't have much time-relevance. The current date is {str(datetime.now().date())}. Do not insert the current date if not neccessary. Respond with only the refined query for the original query. Don’t use punctuation or quotation marks.\n\nExamples:\n---\nOriginal Query: ‘Who is Harald Baldr?’\nRefined Query: ‘Harald Baldr biography’\n---\nOriginal Query: ‘What happened today with the Ohio train derailment?’\nRefined Query: ‘Ohio train derailment details {str(datetime.now().date())}’\n---\nOriginal Query: ‘Is copper in drinking water bad for you?’\nRefined Query: ‘copper in drinking water adverse effects’\n---\nOriginal Query: What's the current time in Mississauga?\nRefined Query: current time Mississauga\nNow, refine the user input query.\nOriginal Query: {query}\nRefined Query:"
query_refined = await llm_predictor_presearch.agenerate(
prompts=[prompt],
)
query_refined_text = query_refined.generations[0][0].text
await self.usage_service.update_usage(
query_refined.llm_output.get("token_usage").get("total_tokens"),
"davinci",
)
price += await self.usage_service.get_price(
query_refined.llm_output.get("token_usage").get("total_tokens"),
"davinci",
)
except Exception as e:
traceback.print_exc()
query_refined_text = query
if ctx:
await self.try_edit(
in_progress_message, self.build_search_refined_embed(query_refined_text)
)
# Get the links for the query
links, all_links = await self.get_links(
query_refined_text, search_scope=search_scope
)
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_links_retrieved_embed(query_refined_text),
)
if all_links is None:
raise ValueError("The Google Search API returned an error.")
# For each link, crawl the page and get all the text that's not HTML garbage.
# Concatenate all the text for a given website into one string and save it into an array:
documents = []
for link in links:
# First, attempt a connection with a timeout of 3 seconds to the link, if the timeout occurs, don't
# continue to the document loading.
pdf = False
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=1) as response:
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
if response.status not in [200, 203, 202, 204]:
for link2 in all_links:
if link2 not in links:
links.append(link2)
break
continue
# Follow redirects
elif response.status in [301, 302, 303, 307, 308]:
try:
links.append(response.url)
continue
except:
continue
else:
# Detect if the link is a PDF, if it is, we load it differently
if response.headers["Content-Type"] == "application/pdf":
pdf = True
except:
try:
# Try to add a link from all_links, this is kind of messy.
for link2 in all_links:
if link2 not in links:
links.append(link2)
break
except:
pass
continue
try:
if not pdf:
document = await self.loop.run_in_executor(
None, partial(self.index_webpage, link)
)
else:
document = await self.index_pdf(link)
[documents.append(doc) for doc in document]
except Exception as e:
traceback.print_exc()
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_webpages_retrieved_embed(query_refined_text),
)
embedding_model = OpenAIEmbedding()
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model))
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embedding_model,
callback_manager=callback_manager,
)
# Check price
token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)
callback_manager_mock = CallbackManager([token_counter_mock])
embed_model_mock = MockEmbedding(embed_dim=1536)
service_context_mock = ServiceContext.from_defaults(
embed_model=embed_model_mock, callback_manager=callback_manager_mock
)
self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents,
service_context=service_context_mock,
),
)
total_usage_price = await self.usage_service.get_price(
token_counter_mock.total_embedding_token_count, "embedding"
)
if total_usage_price > 1.00:
raise ValueError(
"Doing this search would be prohibitively expensive. Please try a narrower search scope."
)
if not deep:
index = await self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents,
service_context=service_context,
use_async=True,
),
)
# save the index to disk if not a redo
if not redo:
self.add_search_index(
index,
ctx.user.id
if isinstance(ctx, discord.ApplicationContext)
else ctx.author.id,
query,
)
else:
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_determining_price_embed(query_refined_text),
)
graph_builder = QASummaryQueryEngineBuilder(service_context=service_context)
index = await self.loop.run_in_executor(
None,
partial(
graph_builder.build_from_documents,
documents,
),
)
if ctx:
await self.try_edit(
in_progress_message, self.build_search_indexed_embed(query_refined_text)
)
########################################
if not deep:
step_decompose_transform = StepDecomposeQueryTransform(
service_context.llm_predictor
)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=nodes or DEFAULT_SEARCH_NODES,
)
response_synthesizer = ResponseSynthesizer.from_args(
response_mode=response_mode,
use_async=True,
refine_template=CHAT_REFINE_PROMPT,
text_qa_template=self.qaprompt,
optimizer=SentenceEmbeddingOptimizer(threshold_cutoff=0.7),
service_context=service_context,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
multistep_query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=step_decompose_transform,
index_summary="Provides information about everything you need to know about this topic, use this to answer the question.",
)
if multistep:
response = await self.loop.run_in_executor(
None,
partial(multistep_query_engine.query, query),
)
else:
response = await self.loop.run_in_executor(
None,
partial(query_engine.query, query),
)
else:
query_configs = [
{
"index_struct_type": "simple_dict",
"query_mode": "default",
"query_kwargs": {"similarity_top_k": 1},
},
{
"index_struct_type": "list",
"query_mode": "default",
"query_kwargs": {
"response_mode": "tree_summarize",
"use_async": True,
"verbose": True,
},
},
{
"index_struct_type": "tree",
"query_mode": "default",
"query_kwargs": {
"verbose": True,
"use_async": True,
"child_branch_factor": 2,
},
},
]
response = await self.loop.run_in_executor(
None,
partial(
index.query,
query,
),
)
await self.usage_service.update_usage(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
price += await self.usage_service.get_price(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
) + await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_final_embed(query_refined_text, str(round(price, 6))),
)
return response, query_refined_text
| [
"llama_index.indices.query.query_transform.StepDecomposeQueryTransform",
"llama_index.OpenAIEmbedding",
"llama_index.composability.QASummaryQueryEngineBuilder",
"llama_index.MockEmbedding",
"llama_index.QuestionAnswerPrompt",
"llama_index.ServiceContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine",
"llama_index.optimization.SentenceEmbeddingOptimizer",
"llama_index.BeautifulSoupWebReader",
"llama_index.SimpleDirectoryReader",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.callbacks.CallbackManager",
"llama_index.query_engine.MultiStepQueryEngine"
] | [((1193, 1226), 'services.environment_service.EnvService.get_max_search_price', 'EnvService.get_max_search_price', ([], {}), '()\n', (1224, 1226), False, 'from services.environment_service import EnvService\n'), ((1404, 1442), 'services.environment_service.EnvService.get_google_search_api_key', 'EnvService.get_google_search_api_key', ([], {}), '()\n', (1440, 1442), False, 'from services.environment_service import EnvService\n'), ((1482, 1522), 'services.environment_service.EnvService.get_google_search_engine_id', 'EnvService.get_google_search_engine_id', ([], {}), '()\n', (1520, 1522), False, 'from services.environment_service import EnvService\n'), ((1543, 1569), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (1567, 1569), False, 'import asyncio\n'), ((1594, 2249), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['"""You are formulating the response to a search query given the search prompt and the context. Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"""'], {}), '(\n """You are formulating the response to a search query given the search prompt and the context. Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"""\n )\n', (1614, 2249), False, 'from llama_index import BeautifulSoupWebReader, Document, GPTVectorStoreIndex, LLMPredictor, MockEmbedding, OpenAIEmbedding, QuestionAnswerPrompt, ResponseSynthesizer, ServiceContext, SimpleDirectoryReader\n'), ((2380, 2405), 'os.getenv', 'os.getenv', (['"""OPENAI_TOKEN"""'], {}), "('OPENAI_TOKEN')\n", (2389, 2405), False, 'import os\n'), ((14709, 14726), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (14724, 14726), False, 'from llama_index import BeautifulSoupWebReader, Document, GPTVectorStoreIndex, LLMPredictor, MockEmbedding, OpenAIEmbedding, QuestionAnswerPrompt, ResponseSynthesizer, ServiceContext, SimpleDirectoryReader\n'), ((14978, 15010), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (14993, 15010), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((15038, 15164), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embedding_model', 'callback_manager': 'callback_manager'}), '(llm_predictor=llm_predictor, embed_model=\n embedding_model, callback_manager=callback_manager)\n', (15066, 15164), False, 'from llama_index import BeautifulSoupWebReader, Document, GPTVectorStoreIndex, LLMPredictor, MockEmbedding, OpenAIEmbedding, QuestionAnswerPrompt, ResponseSynthesizer, ServiceContext, SimpleDirectoryReader\n'), ((15402, 15439), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter_mock]'], {}), '([token_counter_mock])\n', (15417, 15439), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((15467, 15496), 'llama_index.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(1536)'}), '(embed_dim=1536)\n', (15480, 15496), False, 'from llama_index import BeautifulSoupWebReader, Document, GPTVectorStoreIndex, LLMPredictor, MockEmbedding, OpenAIEmbedding, QuestionAnswerPrompt, ResponseSynthesizer, ServiceContext, SimpleDirectoryReader\n'), ((15528, 15631), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model_mock', 'callback_manager': 'callback_manager_mock'}), '(embed_model=embed_model_mock, callback_manager\n =callback_manager_mock)\n', (15556, 15631), False, 'from llama_index import BeautifulSoupWebReader, Document, GPTVectorStoreIndex, LLMPredictor, MockEmbedding, OpenAIEmbedding, QuestionAnswerPrompt, ResponseSynthesizer, ServiceContext, SimpleDirectoryReader\n'), ((6017, 6040), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (6038, 6040), False, 'import aiohttp\n'), ((6922, 6945), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (6943, 6945), False, 'import aiohttp\n'), ((9024, 9121), 'langchain.OpenAI', 'OpenAI', ([], {'max_tokens': '(50)', 'temperature': '(0.4)', 'presence_penalty': '(0.65)', 'model_name': '"""text-davinci-003"""'}), "(max_tokens=50, temperature=0.4, presence_penalty=0.65, model_name=\n 'text-davinci-003')\n", (9030, 9121), False, 'from langchain import OpenAI\n'), ((15714, 15811), 'functools.partial', 'partial', (['GPTVectorStoreIndex.from_documents', 'documents'], {'service_context': 'service_context_mock'}), '(GPTVectorStoreIndex.from_documents, documents, service_context=\n service_context_mock)\n', (15721, 15811), False, 'from functools import partial\n'), ((17114, 17174), 'llama_index.composability.QASummaryQueryEngineBuilder', 'QASummaryQueryEngineBuilder', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (17141, 17174), False, 'from llama_index.composability import QASummaryQueryEngineBuilder\n'), ((17660, 17718), 'llama_index.indices.query.query_transform.StepDecomposeQueryTransform', 'StepDecomposeQueryTransform', (['service_context.llm_predictor'], {}), '(service_context.llm_predictor)\n', (17687, 17718), False, 'from llama_index.indices.query.query_transform import StepDecomposeQueryTransform\n'), ((17774, 17859), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(nodes or DEFAULT_SEARCH_NODES)'}), '(index=index, similarity_top_k=nodes or\n DEFAULT_SEARCH_NODES)\n', (17794, 17859), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((18314, 18403), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (18334, 18403), False, 'from llama_index.query_engine import MultiStepQueryEngine, RetrieverQueryEngine\n'), ((18466, 18693), 'llama_index.query_engine.MultiStepQueryEngine', 'MultiStepQueryEngine', ([], {'query_engine': 'query_engine', 'query_transform': 'step_decompose_transform', 'index_summary': '"""Provides information about everything you need to know about this topic, use this to answer the question."""'}), "(query_engine=query_engine, query_transform=\n step_decompose_transform, index_summary=\n 'Provides information about everything you need to know about this topic, use this to answer the question.'\n )\n", (18486, 18693), False, 'from llama_index.query_engine import MultiStepQueryEngine, RetrieverQueryEngine\n'), ((3199, 3222), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (3220, 3222), False, 'import discord\n'), ((3600, 3623), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (3621, 3623), False, 'import discord\n'), ((3986, 4009), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (4007, 4009), False, 'import discord\n'), ((4383, 4406), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (4404, 4406), False, 'import discord\n'), ((4750, 4773), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (4771, 4773), False, 'import discord\n'), ((5138, 5161), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (5159, 5161), False, 'import discord\n'), ((5545, 5568), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (5566, 5568), False, 'import discord\n'), ((5742, 5809), 'llama_index.BeautifulSoupWebReader', 'BeautifulSoupWebReader', ([], {'website_extractor': 'DEFAULT_WEBSITE_EXTRACTOR'}), '(website_extractor=DEFAULT_WEBSITE_EXTRACTOR)\n', (5764, 5809), False, 'from llama_index import BeautifulSoupWebReader, Document, GPTVectorStoreIndex, LLMPredictor, MockEmbedding, OpenAIEmbedding, QuestionAnswerPrompt, ResponseSynthesizer, ServiceContext, SimpleDirectoryReader\n'), ((6607, 6650), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[f.name]'}), '(input_files=[f.name])\n', (6628, 6650), False, 'from llama_index import BeautifulSoupWebReader, Document, GPTVectorStoreIndex, LLMPredictor, MockEmbedding, OpenAIEmbedding, QuestionAnswerPrompt, ResponseSynthesizer, ServiceContext, SimpleDirectoryReader\n'), ((7955, 7976), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7974, 7976), False, 'import traceback\n'), ((8122, 8143), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8141, 8143), False, 'import traceback\n'), ((11468, 11489), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11487, 11489), False, 'import traceback\n'), ((14769, 14812), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': 'model'}), '(temperature=0, model_name=model)\n', (14779, 14812), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2769, 2781), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2779, 2781), False, 'from datetime import date, datetime\n'), ((2790, 2802), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2800, 2802), False, 'from datetime import date, datetime\n'), ((6222, 6278), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pdf"""', 'delete': '(False)'}), "(suffix='.pdf', delete=False)\n", (6249, 6278), False, 'import tempfile\n'), ((12583, 12606), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (12604, 12606), False, 'import aiohttp\n'), ((14479, 14500), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14498, 14500), False, 'import traceback\n'), ((14883, 14917), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (14910, 14917), False, 'import tiktoken\n'), ((15303, 15337), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (15330, 15337), False, 'import tiktoken\n'), ((16327, 16435), 'functools.partial', 'partial', (['GPTVectorStoreIndex.from_documents', 'documents'], {'service_context': 'service_context', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents, service_context=\n service_context, use_async=True)\n', (16334, 16435), False, 'from functools import partial\n'), ((17267, 17321), 'functools.partial', 'partial', (['graph_builder.build_from_documents', 'documents'], {}), '(graph_builder.build_from_documents, documents)\n', (17274, 17321), False, 'from functools import partial\n'), ((18173, 18221), 'llama_index.optimization.SentenceEmbeddingOptimizer', 'SentenceEmbeddingOptimizer', ([], {'threshold_cutoff': '(0.7)'}), '(threshold_cutoff=0.7)\n', (18199, 18221), False, 'from llama_index.optimization import SentenceEmbeddingOptimizer\n'), ((20128, 20155), 'functools.partial', 'partial', (['index.query', 'query'], {}), '(index.query, query)\n', (20135, 20155), False, 'from functools import partial\n'), ((18874, 18918), 'functools.partial', 'partial', (['multistep_query_engine.query', 'query'], {}), '(multistep_query_engine.query, query)\n', (18881, 18918), False, 'from functools import partial\n'), ((19062, 19096), 'functools.partial', 'partial', (['query_engine.query', 'query'], {}), '(query_engine.query, query)\n', (19069, 19096), False, 'from functools import partial\n'), ((2592, 2614), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (2612, 2614), False, 'from services.environment_service import EnvService\n'), ((2886, 2908), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (2906, 2908), False, 'from services.environment_service import EnvService\n'), ((14232, 14265), 'functools.partial', 'partial', (['self.index_webpage', 'link'], {}), '(self.index_webpage, link)\n', (14239, 14265), False, 'from functools import partial\n'), ((10151, 10165), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10163, 10165), False, 'from datetime import date, datetime\n'), ((10571, 10585), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10583, 10585), False, 'from datetime import date, datetime\n')] |
import asyncio
import json
import os
import tempfile
import time
from functools import lru_cache
from logging import getLogger
from pathlib import Path
from fastapi import APIRouter, Request, status
from fastapi.encoders import jsonable_encoder
from fastapi.responses import HTMLResponse
from typing import List, Dict, Any
from pydantic import Field, validator
# This is here to satisfy runtime import needs
# that pyinstaller appears to miss
from llama_index.node_parser import SentenceSplitter
from llama_index.schema import TextNode, NodeRelationship, RelatedNodeInfo, MetadataMode, NodeWithScore
from llama_index.callbacks import CallbackManager, LlamaDebugHandler, OpenInferenceCallbackHandler
from llama_index.embeddings import OpenAIEmbedding, OllamaEmbedding
from llama_index.indices.query.query_transform import HyDEQueryTransform
from llama_index.query_pipeline import QueryPipeline
from llama_index.llms import OpenAI, Ollama
from llama_index.llms.base import BaseLLM
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index import LLMPredictor, PromptTemplate, VectorStoreIndex, Document, StorageContext, ServiceContext, download_loader
from llama_index.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.embeddings import OpenAIEmbedding
from llama_index.llms import OpenAI
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index import VectorStoreIndex, Document, StorageContext, ServiceContext, download_loader
from llama_index.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.indices.query.query_transform.base import DecomposeQueryTransform
from llama_index import ServiceContext
from llama_index.postprocessor import CohereRerank
from llama_index.response_synthesizers import TreeSummarize
from llama_index.postprocessor import PrevNextNodePostprocessor, LLMRerank
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.query_pipeline import CustomQueryComponent, InputKeys, OutputKeys
from llama_index.postprocessor.types import BaseNodePostprocessor
from llama_index.vector_stores.types import BasePydanticVectorStore
from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever, VectorIndexRetriever
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
from snowflake import SnowflakeGenerator
from service.dependencies import (
TANA_NODE,
TANA_TEXT,
LlamaindexAsk,
TanaNodeMetadata,
)
from service.endpoints.chroma import get_collection, get_tana_nodes_by_id
from service.endpoints.topics import TanaDocument, extract_topics, is_reference_content, tana_node_ids_from_text
from service.llamaindex import DecomposeQueryWithNodeContext, WidenNodeWindowPostProcessor, create_index, get_index
from service.tana_types import TanaDump
logger = getLogger()
snowflakes = SnowflakeGenerator(42)
router = APIRouter()
minutes = 1000 * 60
# TODO: Add header support throughout so we can pass Tana API key and OpenAPI Key as headers
# NOTE: we already have this in the main.py middleware wrapper, but it would be better
# to do it here for OpenAPI spec purposes.
# x_tana_api_token: Annotated[str | None, Header()] = None
# x_openai_api_key: Annotated[str | None, Header()] = None
# enrich our retriever with knowledge of our metadata
def get_auto_retriever(index:VectorStoreIndex):
vector_store_info = VectorStoreInfo(
content_info="My Tana Notebook. Comprises many Tana nodes with text and metadata fields.",
metadata_info=[
MetadataInfo(
name="category",
type="str",
description=(
"One of TANA_NODE or TANA_TEXT\n"
"TANA_NODE means that this is a top-level topic in my Tana notebook\n"
"TANA_TEXT means this is detailed information as part of a topic, identfied by topic_id metadata.\n"
"Do NOT use category to query the index. Only use category to enrich your understanding of the result.\n"
"DO NOT reference category in your responses.\n"
),
),
MetadataInfo(
name="topic_id",
type="str",
description=(
"Identifies the Tana Notebook Node that this text is part of. Should be used as a reference to the notebook entry.\n"
"Only use topic_id to query the index when you want a single specific node by reference.\n"
"You can use topic_id when referencing a Tana Notebook Node in your responses.\n"
),
),
MetadataInfo(
name="tana_id",
type="str",
description=(
"The Tana Notebook Node for this piece of text. Should be used a reference to the notebook entry.\n"
"Only use topic_id to query the index when you want a single specific node by reference.\n"
"You can use tana_id when referencing a Tana Notebook Node in your responses.\n"
),
),
MetadataInfo(
name="supertag",
type="str",
description=(
"One or more optional GENERAL semantic ontology tags for this Tana Notebook Node.\n"
"Delimited by spaces (NOT a LIST. Do not use IN operator to test membership)\n"
"Example: \n"
"{ supertag: #task #topic #person #meeting }\n"
"Do NOT use supertags to query the index. Only use supertags to enrich your understanding of the result.\n"
),
),
],
)
# THIS doesn't work at all well with GPT 3
# and only works sometimes with GPT4. Problem is that it becomes fixated on the
# use of metadata to filter results, overly constraining relevance.
# retriever = VectorIndexAutoRetriever(
# index,
# vector_store_info=vector_store_info,
# similarity_top_k=10
# )
retriever = VectorIndexRetriever(index=index, similarity_top_k=10)
return retriever
@router.post("/llamaindex/ask", response_class=HTMLResponse, tags=["research"])
def llamaindex_ask(req: LlamaindexAsk, model:str):
'''Ask a question of the Llamaindex and return the top results
'''
(index, service_context, vector_store, llm) = get_index(model=model)
query_engine=index.as_query_engine(similarity_top_k=20, stream=False)
logger.info(f'Querying LLamaindex with {req.query}')
response = query_engine.query(req.query)
return str(response)
summary_tmpl = PromptTemplate(
"You are an expert Q&A system that is trusted around the world.\n"
"TASK\n"
"Summarize the following CONTEXT in order to best answer the QUERY.\n"
"Answer the QUERY using the provided CONTEXT information, and not prior knowledge.\n"
"Some rules to follow:\n"
"1. Avoid statements like 'Based on the context, ...' or 'The context information ...' or anything along those lines.\n"
"2. The CONTEXT contais references to many Tana Notebook Nodes. Nodes have both metadata and text content\n"
"3. Whenever your summary needs to reference Tana Notebook Nodes from the CONTEXT, use proper Tana node reference format as follows:\n"
" the characters '[[' + '^' + tana_id metadata and then the characters ']]'.\n"
" E.g. to reference the Tana context node titled 'Recipe for making icecream' with tana_id: xghysd76 use this format:\n"
" [[^xghysd76]]\n"
"5. Try to avoid making many redundant references to the same Tana node in your summary. Use footnote style if you really need to do this.\n"
"\n"
"QUERY: {query_str}\n"
"-----\n"
"CONTEXT:\n"
"{context_str}\n"
"END_CONTEXT\n"
"-----\n"
)
#TODO: Move model out of POST body and into query params perhaps?
@router.post("/llamaindex/research", response_class=HTMLResponse, tags=["research"])
def llama_ask_custom_pipeline(req: LlamaindexAsk, model:str):
'''Research a question using Llamaindex and return the top results.'''
(index, service_context, storage_context, llm) = get_index(model, observe=True)
logger.info(f'Researching LLamaindex with {req.query}')
# first, build up a set of research questions
decompose_transform = DecomposeQueryWithNodeContext(llm=llm)
p1 = QueryPipeline(chain=[decompose_transform])
questions = p1.run(query=req.query)
retriever = get_auto_retriever(index)
# and preprocess the result nodes to make use of next/previous
prevnext = WidenNodeWindowPostProcessor(storage_context=storage_context, num_nodes=5, mode="both")
summarizer = TreeSummarize(summary_template=summary_tmpl, service_context=service_context)
# for each question, do a fetch against Chroma to find potentially relevant nodes
results = []
for question in questions:
if question == '':
continue
logger.info(f'Question: {question}')
# use our metadata aware auto-retriever to fetch from Chroma
q1 = QueryPipeline(chain=[retriever, prevnext])
nodes = q1.run(input=question)
# nodes = retriever.retrieve(question)
# logger.info(f'Nodes:\n{nodes}')
# clean up the redudant metadata (TANA_TEXT node metadata is less useful here)
new_nodes = []
if nodes:
for node in nodes:
new_node = node
if node.metadata['category'] == TANA_TEXT:
# copy the outer NodeWithScore and the inner TextNode objects
new_text_node = TextNode(**node.node.dict())
# wipe out the metadata
new_text_node.metadata = {}
new_node = NodeWithScore(node=new_text_node, score=node.score)
new_nodes.append(new_node)
research = '\n'.join([node.get_content(metadata_mode=MetadataMode.LLM) for node in new_nodes])
logger.info(f'Nodes:\n{research}')
# tailor the summarizer prompt
sum_result = summarizer.as_query_component().run_component(nodes=new_nodes, query_str=question)
summary = sum_result['output'].response
logger.info(f'Summary:\n{summary}')
result = {'question': question,
'answers': nodes,
'summary': summary}
results.append(result)
# now build up the context from the result nodes
context = []
for result in results:
question = result['question']
answer = result['answers']
summary = result['summary']
context.append(f'QUESTION: {question}\n')
#context.append('RESEARCH:\n')
# TODO: instead of dumping all nodes into the primary context
# we should prepare an answer to each question and then use that
# node:TextNode
# for node in answer:
# context.append(node.get_content(metadata_mode=MetadataMode.LLM)+'\n')
context.append('ANSWER:\n')
context.append(summary+'\n')
context.append('\n')
# now combine all that research
prompt_tmpl = PromptTemplate(
"You are an expert Q&A system that is trusted around the world.\n"
"Always answer the question using the provided context information, and not prior knowledge.\n"
"Some rules to follow:\n"
"1. Avoid statements like 'Based on the context, ...' or 'The context information ...' or anything along those lines.\n"
"2. You will be given CONTEXT information in the form of one or more related QUESTIONS and the ANSWERS to those questions.\n"
"3. For each ANSWER, there may be many Tana Notebook Nodes. Nodes have both metadata and text content\n"
"4. Whenever your response needs to reference Tana Notebook Nodes from the context, use proper Tana node reference format as follows:\n"
" the characters '[[' + '^' + tana_id metadata and then the characters ']]'.\n"
" E.g. to reference the Tana context node titled 'Recipe for making icecream' with tana_id: xghysd76 use this format:\n"
" [[^xghysd76]]\n"
"5. Try to avoid making many redundant references to the same Tana node in your response. Use footnote style if you really need to do this.\n"
"\n"
"QUERY: {query}\n"
"-----\n"
"CONTEXT:\n"
"{context}\n"
"END_CONTEXT\n"
"-----\n"
)
p2 = QueryPipeline(chain=[prompt_tmpl, llm])
response = p2.run(query=req.query, context='\n'.join(context))
return response.message.content
# attempt to paralleize non-async code
# see https://github.com/tiangolo/fastapi/discussions/6347
lock = asyncio.Lock()
| [
"llama_index.response_synthesizers.TreeSummarize",
"llama_index.vector_stores.types.MetadataInfo",
"llama_index.query_pipeline.QueryPipeline",
"llama_index.PromptTemplate",
"llama_index.indices.vector_store.retrievers.VectorIndexRetriever",
"llama_index.schema.NodeWithScore"
] | [((2834, 2845), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (2843, 2845), False, 'from logging import getLogger\n'), ((2859, 2881), 'snowflake.SnowflakeGenerator', 'SnowflakeGenerator', (['(42)'], {}), '(42)\n', (2877, 2881), False, 'from snowflake import SnowflakeGenerator\n'), ((2892, 2903), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (2901, 2903), False, 'from fastapi import APIRouter, Request, status\n'), ((6478, 7521), 'llama_index.PromptTemplate', 'PromptTemplate', (['"""You are an expert Q&A system that is trusted around the world.\nTASK\nSummarize the following CONTEXT in order to best answer the QUERY.\nAnswer the QUERY using the provided CONTEXT information, and not prior knowledge.\nSome rules to follow:\n1. Avoid statements like \'Based on the context, ...\' or \'The context information ...\' or anything along those lines.\n2. The CONTEXT contais references to many Tana Notebook Nodes. Nodes have both metadata and text content\n3. Whenever your summary needs to reference Tana Notebook Nodes from the CONTEXT, use proper Tana node reference format as follows:\n the characters \'[[\' + \'^\' + tana_id metadata and then the characters \']]\'.\n E.g. to reference the Tana context node titled \'Recipe for making icecream\' with tana_id: xghysd76 use this format:\n [[^xghysd76]]\n5. Try to avoid making many redundant references to the same Tana node in your summary. Use footnote style if you really need to do this.\n\nQUERY: {query_str}\n-----\nCONTEXT:\n{context_str}\nEND_CONTEXT\n-----\n"""'], {}), '(\n """You are an expert Q&A system that is trusted around the world.\nTASK\nSummarize the following CONTEXT in order to best answer the QUERY.\nAnswer the QUERY using the provided CONTEXT information, and not prior knowledge.\nSome rules to follow:\n1. Avoid statements like \'Based on the context, ...\' or \'The context information ...\' or anything along those lines.\n2. The CONTEXT contais references to many Tana Notebook Nodes. Nodes have both metadata and text content\n3. Whenever your summary needs to reference Tana Notebook Nodes from the CONTEXT, use proper Tana node reference format as follows:\n the characters \'[[\' + \'^\' + tana_id metadata and then the characters \']]\'.\n E.g. to reference the Tana context node titled \'Recipe for making icecream\' with tana_id: xghysd76 use this format:\n [[^xghysd76]]\n5. Try to avoid making many redundant references to the same Tana node in your summary. Use footnote style if you really need to do this.\n\nQUERY: {query_str}\n-----\nCONTEXT:\n{context_str}\nEND_CONTEXT\n-----\n"""\n )\n', (6492, 7521), False, 'from llama_index import LLMPredictor, PromptTemplate, VectorStoreIndex, Document, StorageContext, ServiceContext, download_loader\n'), ((12205, 12219), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (12217, 12219), False, 'import asyncio\n'), ((5916, 5970), 'llama_index.indices.vector_store.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(10)'}), '(index=index, similarity_top_k=10)\n', (5936, 5970), False, 'from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever, VectorIndexRetriever\n'), ((6243, 6265), 'service.llamaindex.get_index', 'get_index', ([], {'model': 'model'}), '(model=model)\n', (6252, 6265), False, 'from service.llamaindex import DecomposeQueryWithNodeContext, WidenNodeWindowPostProcessor, create_index, get_index\n'), ((7999, 8029), 'service.llamaindex.get_index', 'get_index', (['model'], {'observe': '(True)'}), '(model, observe=True)\n', (8008, 8029), False, 'from service.llamaindex import DecomposeQueryWithNodeContext, WidenNodeWindowPostProcessor, create_index, get_index\n'), ((8162, 8200), 'service.llamaindex.DecomposeQueryWithNodeContext', 'DecomposeQueryWithNodeContext', ([], {'llm': 'llm'}), '(llm=llm)\n', (8191, 8200), False, 'from service.llamaindex import DecomposeQueryWithNodeContext, WidenNodeWindowPostProcessor, create_index, get_index\n'), ((8208, 8250), 'llama_index.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[decompose_transform]'}), '(chain=[decompose_transform])\n', (8221, 8250), False, 'from llama_index.query_pipeline import QueryPipeline\n'), ((8412, 8503), 'service.llamaindex.WidenNodeWindowPostProcessor', 'WidenNodeWindowPostProcessor', ([], {'storage_context': 'storage_context', 'num_nodes': '(5)', 'mode': '"""both"""'}), "(storage_context=storage_context, num_nodes=5,\n mode='both')\n", (8440, 8503), False, 'from service.llamaindex import DecomposeQueryWithNodeContext, WidenNodeWindowPostProcessor, create_index, get_index\n'), ((8515, 8592), 'llama_index.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'summary_template': 'summary_tmpl', 'service_context': 'service_context'}), '(summary_template=summary_tmpl, service_context=service_context)\n', (8528, 8592), False, 'from llama_index.response_synthesizers import TreeSummarize\n'), ((10727, 11820), 'llama_index.PromptTemplate', 'PromptTemplate', (['"""You are an expert Q&A system that is trusted around the world.\nAlways answer the question using the provided context information, and not prior knowledge.\nSome rules to follow:\n1. Avoid statements like \'Based on the context, ...\' or \'The context information ...\' or anything along those lines.\n2. You will be given CONTEXT information in the form of one or more related QUESTIONS and the ANSWERS to those questions.\n3. For each ANSWER, there may be many Tana Notebook Nodes. Nodes have both metadata and text content\n4. Whenever your response needs to reference Tana Notebook Nodes from the context, use proper Tana node reference format as follows:\n the characters \'[[\' + \'^\' + tana_id metadata and then the characters \']]\'.\n E.g. to reference the Tana context node titled \'Recipe for making icecream\' with tana_id: xghysd76 use this format:\n [[^xghysd76]]\n5. Try to avoid making many redundant references to the same Tana node in your response. Use footnote style if you really need to do this.\n\nQUERY: {query}\n-----\nCONTEXT:\n{context}\nEND_CONTEXT\n-----\n"""'], {}), '(\n """You are an expert Q&A system that is trusted around the world.\nAlways answer the question using the provided context information, and not prior knowledge.\nSome rules to follow:\n1. Avoid statements like \'Based on the context, ...\' or \'The context information ...\' or anything along those lines.\n2. You will be given CONTEXT information in the form of one or more related QUESTIONS and the ANSWERS to those questions.\n3. For each ANSWER, there may be many Tana Notebook Nodes. Nodes have both metadata and text content\n4. Whenever your response needs to reference Tana Notebook Nodes from the context, use proper Tana node reference format as follows:\n the characters \'[[\' + \'^\' + tana_id metadata and then the characters \']]\'.\n E.g. to reference the Tana context node titled \'Recipe for making icecream\' with tana_id: xghysd76 use this format:\n [[^xghysd76]]\n5. Try to avoid making many redundant references to the same Tana node in your response. Use footnote style if you really need to do this.\n\nQUERY: {query}\n-----\nCONTEXT:\n{context}\nEND_CONTEXT\n-----\n"""\n )\n', (10741, 11820), False, 'from llama_index import LLMPredictor, PromptTemplate, VectorStoreIndex, Document, StorageContext, ServiceContext, download_loader\n'), ((11960, 11999), 'llama_index.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl, llm]'}), '(chain=[prompt_tmpl, llm])\n', (11973, 11999), False, 'from llama_index.query_pipeline import QueryPipeline\n'), ((8876, 8918), 'llama_index.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[retriever, prevnext]'}), '(chain=[retriever, prevnext])\n', (8889, 8918), False, 'from llama_index.query_pipeline import QueryPipeline\n'), ((3533, 3945), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""category"""', 'type': '"""str"""', 'description': '"""One of TANA_NODE or TANA_TEXT\nTANA_NODE means that this is a top-level topic in my Tana notebook\nTANA_TEXT means this is detailed information as part of a topic, identfied by topic_id metadata.\nDo NOT use category to query the index. Only use category to enrich your understanding of the result.\nDO NOT reference category in your responses.\n"""'}), '(name=\'category\', type=\'str\', description=\n """One of TANA_NODE or TANA_TEXT\nTANA_NODE means that this is a top-level topic in my Tana notebook\nTANA_TEXT means this is detailed information as part of a topic, identfied by topic_id metadata.\nDo NOT use category to query the index. Only use category to enrich your understanding of the result.\nDO NOT reference category in your responses.\n"""\n )\n', (3545, 3945), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((4101, 4452), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""topic_id"""', 'type': '"""str"""', 'description': '"""Identifies the Tana Notebook Node that this text is part of. Should be used as a reference to the notebook entry.\nOnly use topic_id to query the index when you want a single specific node by reference.\nYou can use topic_id when referencing a Tana Notebook Node in your responses.\n"""'}), '(name=\'topic_id\', type=\'str\', description=\n """Identifies the Tana Notebook Node that this text is part of. Should be used as a reference to the notebook entry.\nOnly use topic_id to query the index when you want a single specific node by reference.\nYou can use topic_id when referencing a Tana Notebook Node in your responses.\n"""\n )\n', (4113, 4452), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((4568, 4900), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""tana_id"""', 'type': '"""str"""', 'description': '"""The Tana Notebook Node for this piece of text. Should be used a reference to the notebook entry.\nOnly use topic_id to query the index when you want a single specific node by reference.\nYou can use tana_id when referencing a Tana Notebook Node in your responses.\n"""'}), '(name=\'tana_id\', type=\'str\', description=\n """The Tana Notebook Node for this piece of text. Should be used a reference to the notebook entry.\nOnly use topic_id to query the index when you want a single specific node by reference.\nYou can use tana_id when referencing a Tana Notebook Node in your responses.\n"""\n )\n', (4580, 4900), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((5016, 5403), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""supertag"""', 'type': '"""str"""', 'description': '"""One or more optional GENERAL semantic ontology tags for this Tana Notebook Node.\nDelimited by spaces (NOT a LIST. Do not use IN operator to test membership)\nExample: \n{ supertag: #task #topic #person #meeting }\nDo NOT use supertags to query the index. Only use supertags to enrich your understanding of the result.\n"""'}), '(name=\'supertag\', type=\'str\', description=\n """One or more optional GENERAL semantic ontology tags for this Tana Notebook Node.\nDelimited by spaces (NOT a LIST. Do not use IN operator to test membership)\nExample: \n{ supertag: #task #topic #person #meeting }\nDo NOT use supertags to query the index. Only use supertags to enrich your understanding of the result.\n"""\n )\n', (5028, 5403), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((9472, 9523), 'llama_index.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_text_node', 'score': 'node.score'}), '(node=new_text_node, score=node.score)\n', (9485, 9523), False, 'from llama_index.schema import TextNode, NodeRelationship, RelatedNodeInfo, MetadataMode, NodeWithScore\n')] |
from dotenv import load_dotenv
import cv2
import numpy as np
import os
import streamlit as st
from llama_index import SimpleDirectoryReader
from pydantic_llm import (
pydantic_llm,
DamagedParts,
damages_initial_prompt_str,
ConditionsReport,
conditions_report_initial_prompt_str,
)
import pandas as pd
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from car_colorizer import process_car_parts
import requests
from io import BytesIO
from streamlit_modal import Modal
import streamlit.components.v1 as components
modal = Modal("Damage Report", key="demo", max_width=1280)
api_url = "https://dmg-decoder.up.railway.app"
def create_report(data={"test": "123"}):
url = f"{api_url}/api/create_report"
response = requests.post(
url, json=data, headers={"Content-Type": "application/json"}
)
json = response.json()
print(json)
return json["id"]
load_dotenv()
states_names = ["front_image", "back_image", "left_image", "right_image", "report_id"]
openai_mm_llm = OpenAIMultiModal(model="gpt-4-vision-preview")
# Remove form border and padding styles
css = r"""
<style>
[data-testid="stForm"] {border: 0px;padding:0px}
</style>
"""
st.markdown(css, unsafe_allow_html=True)
for state_name in states_names:
if state_name not in st.session_state:
st.session_state[state_name] = None
st.title("Damage Decoder")
st.subheader("Upload your car crash pictures")
def create_drag_and_drop(state_name, label):
st.session_state[state_name] = st.file_uploader(
label=label, key=f"{state_name}_image"
)
if st.session_state[state_name] is not None:
css = f"""
<style>
[aria-label="{label}"] {{display: none;}}
</style>
"""
st.markdown(css, unsafe_allow_html=True)
file_bytes = np.asarray(
bytearray(st.session_state[state_name].read()), dtype=np.uint8
)
opencv_image = cv2.imdecode(file_bytes, 1)
st.image(opencv_image, channels="BGR")
col1, col2 = st.columns(2)
with col1:
create_drag_and_drop("front_image", "Front Image")
create_drag_and_drop("right_image", "Left Image")
with col2:
create_drag_and_drop("back_image", "Back Image")
create_drag_and_drop("left_image", "Right Image")
def save_image(state_name):
path = os.path.join(os.getcwd(), "images")
if not os.path.exists(path):
os.makedirs(path)
if st.session_state[state_name] is not None:
with open(os.path.join(path, f"{state_name}.jpg"), "wb") as f:
f.write(st.session_state[state_name].getbuffer())
def delete_image(state_name):
path = os.path.join(os.getcwd(), "images")
if st.session_state[state_name] is not None and os.path.exists(
os.path.join(path, f"{state_name}.jpg")
):
os.remove(os.path.join(path, f"{state_name}.jpg"))
with st.form(key="car_form"):
selected_make = st.selectbox(
"Select your car make",
("Ford", "Subaru", "BMW", "Mercedes", "Volkswagen", "Volvo"),
)
selected_model = st.selectbox(
"Select your car model",
("Mustang", "Outback", "X3", "C-Class", "Golf", "XC60"),
)
selected_year = st.selectbox(
"Select your car year",
("2007", "2010", "2011", "2012", "2013", "2014"),
)
selected_llm_model = st.selectbox(
"Select LLM model",
("Gemini", "OpenAI"),
)
submit_button = st.form_submit_button(label="Submit")
if submit_button:
with st.spinner("Processing..."):
for state_name in states_names:
save_image(state_name)
path = os.path.join(os.getcwd(), "images")
image_documents = SimpleDirectoryReader(path).load_data()
conditions_report_response = pydantic_llm(
output_class=ConditionsReport,
image_documents=image_documents,
prompt_template_str=conditions_report_initial_prompt_str.format(
make_name=selected_make, model_name=selected_model, year=selected_year
),
selected_llm_model=selected_llm_model,
)
for state_name in states_names:
delete_image(state_name)
request_data = []
for part, condition in dict(conditions_report_response).items():
request_data.append({"part": part, "condition": condition})
id = create_report(
data={
"conditions_report": request_data,
"car_name": f"{selected_make} {selected_model} {selected_year}",
}
)
st.session_state["report_id"] = id
car_sides = ["front", "back", "left", "right"]
import boto3
s3 = boto3.resource("s3")
for side in car_sides:
colored_side = process_car_parts(dict(conditions_report_response), side)
in_memory_file = BytesIO()
colored_side.save(in_memory_file, format="PNG")
in_memory_file.seek(0)
s3.Bucket("elastic-llm").put_object(
Key=f"{id}/colored_car_{side}.png",
Body=in_memory_file,
)
modal.open()
if modal.is_open():
with modal.container():
st.markdown(
f"<a href='{api_url}/report/{st.session_state['report_id']}' target='_blank'>Go to report</a>",
unsafe_allow_html=True,
)
st.code(f"{api_url}/report/{st.session_state['report_id']}", language="python")
html_string = f"""
<div style="max-height:350px;overflow-y:auto;overflow-x:hidden">
<iframe style="overflow-x:hidden" src="{api_url}/report/{st.session_state['report_id']}" width="100%" height="960px"></iframe>
</div>
"""
components.html(html_string, height=350)
# st.subheader("Summary")
# st.write(damages_response.summary)
# st.subheader("Damaged Parts")
# df = pd.DataFrame.from_records(
# [part.model_dump() for part in damages_response.damaged_parts]
# )
# st.dataframe(df)
# TODO: look for the parts in the vector store
# filters = MetadataFilters(
# filters=[
# MetadataFilter(key="make", value=selected_make),
# MetadataFilter(key="model", value=selected_model),
# MetadataFilter(key="year", value=selected_year),
# ]
# )
# retriever = VectorStoreIndex.from_vector_store(vector_store).as_retriever(
# filters=filters,
# )
# query_engine = RetrieverQueryEngine(
# retriever=retriever,
# )
| [
"llama_index.multi_modal_llms.openai.OpenAIMultiModal",
"llama_index.SimpleDirectoryReader"
] | [((557, 607), 'streamlit_modal.Modal', 'Modal', (['"""Damage Report"""'], {'key': '"""demo"""', 'max_width': '(1280)'}), "('Damage Report', key='demo', max_width=1280)\n", (562, 607), False, 'from streamlit_modal import Modal\n'), ((912, 925), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (923, 925), False, 'from dotenv import load_dotenv\n'), ((1032, 1078), 'llama_index.multi_modal_llms.openai.OpenAIMultiModal', 'OpenAIMultiModal', ([], {'model': '"""gpt-4-vision-preview"""'}), "(model='gpt-4-vision-preview')\n", (1048, 1078), False, 'from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n'), ((1217, 1257), 'streamlit.markdown', 'st.markdown', (['css'], {'unsafe_allow_html': '(True)'}), '(css, unsafe_allow_html=True)\n', (1228, 1257), True, 'import streamlit as st\n'), ((1381, 1407), 'streamlit.title', 'st.title', (['"""Damage Decoder"""'], {}), "('Damage Decoder')\n", (1389, 1407), True, 'import streamlit as st\n'), ((1410, 1456), 'streamlit.subheader', 'st.subheader', (['"""Upload your car crash pictures"""'], {}), "('Upload your car crash pictures')\n", (1422, 1456), True, 'import streamlit as st\n'), ((2070, 2083), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (2080, 2083), True, 'import streamlit as st\n'), ((755, 830), 'requests.post', 'requests.post', (['url'], {'json': 'data', 'headers': "{'Content-Type': 'application/json'}"}), "(url, json=data, headers={'Content-Type': 'application/json'})\n", (768, 830), False, 'import requests\n'), ((1539, 1595), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': 'label', 'key': 'f"""{state_name}_image"""'}), "(label=label, key=f'{state_name}_image')\n", (1555, 1595), True, 'import streamlit as st\n'), ((2911, 2934), 'streamlit.form', 'st.form', ([], {'key': '"""car_form"""'}), "(key='car_form')\n", (2918, 2934), True, 'import streamlit as st\n'), ((2956, 3058), 'streamlit.selectbox', 'st.selectbox', (['"""Select your car make"""', "('Ford', 'Subaru', 'BMW', 'Mercedes', 'Volkswagen', 'Volvo')"], {}), "('Select your car make', ('Ford', 'Subaru', 'BMW', 'Mercedes',\n 'Volkswagen', 'Volvo'))\n", (2968, 3058), True, 'import streamlit as st\n'), ((3100, 3198), 'streamlit.selectbox', 'st.selectbox', (['"""Select your car model"""', "('Mustang', 'Outback', 'X3', 'C-Class', 'Golf', 'XC60')"], {}), "('Select your car model', ('Mustang', 'Outback', 'X3',\n 'C-Class', 'Golf', 'XC60'))\n", (3112, 3198), True, 'import streamlit as st\n'), ((3239, 3329), 'streamlit.selectbox', 'st.selectbox', (['"""Select your car year"""', "('2007', '2010', '2011', '2012', '2013', '2014')"], {}), "('Select your car year', ('2007', '2010', '2011', '2012',\n '2013', '2014'))\n", (3251, 3329), True, 'import streamlit as st\n'), ((3375, 3429), 'streamlit.selectbox', 'st.selectbox', (['"""Select LLM model"""', "('Gemini', 'OpenAI')"], {}), "('Select LLM model', ('Gemini', 'OpenAI'))\n", (3387, 3429), True, 'import streamlit as st\n'), ((3474, 3511), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Submit"""'}), "(label='Submit')\n", (3495, 3511), True, 'import streamlit as st\n'), ((1798, 1838), 'streamlit.markdown', 'st.markdown', (['css'], {'unsafe_allow_html': '(True)'}), '(css, unsafe_allow_html=True)\n', (1809, 1838), True, 'import streamlit as st\n'), ((1980, 2007), 'cv2.imdecode', 'cv2.imdecode', (['file_bytes', '(1)'], {}), '(file_bytes, 1)\n', (1992, 2007), False, 'import cv2\n'), ((2016, 2054), 'streamlit.image', 'st.image', (['opencv_image'], {'channels': '"""BGR"""'}), "(opencv_image, channels='BGR')\n", (2024, 2054), True, 'import streamlit as st\n'), ((2378, 2389), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2387, 2389), False, 'import os\n'), ((2412, 2432), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2426, 2432), False, 'import os\n'), ((2442, 2459), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2453, 2459), False, 'import os\n'), ((2699, 2710), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2708, 2710), False, 'import os\n'), ((3540, 3567), 'streamlit.spinner', 'st.spinner', (['"""Processing..."""'], {}), "('Processing...')\n", (3550, 3567), True, 'import streamlit as st\n'), ((4732, 4752), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (4746, 4752), False, 'import boto3\n'), ((5235, 5376), 'streamlit.markdown', 'st.markdown', (['f"""<a href=\'{api_url}/report/{st.session_state[\'report_id\']}\' target=\'_blank\'>Go to report</a>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<a href=\'{api_url}/report/{st.session_state[\'report_id\']}\' target=\'_blank\'>Go to report</a>"\n , unsafe_allow_html=True)\n', (5246, 5376), True, 'import streamlit as st\n'), ((5411, 5490), 'streamlit.code', 'st.code', (['f"""{api_url}/report/{st.session_state[\'report_id\']}"""'], {'language': '"""python"""'}), '(f"{api_url}/report/{st.session_state[\'report_id\']}", language=\'python\')\n', (5418, 5490), True, 'import streamlit as st\n'), ((5778, 5818), 'streamlit.components.v1.html', 'components.html', (['html_string'], {'height': '(350)'}), '(html_string, height=350)\n', (5793, 5818), True, 'import streamlit.components.v1 as components\n'), ((2798, 2837), 'os.path.join', 'os.path.join', (['path', 'f"""{state_name}.jpg"""'], {}), "(path, f'{state_name}.jpg')\n", (2810, 2837), False, 'import os\n'), ((2863, 2902), 'os.path.join', 'os.path.join', (['path', 'f"""{state_name}.jpg"""'], {}), "(path, f'{state_name}.jpg')\n", (2875, 2902), False, 'import os\n'), ((3672, 3683), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3681, 3683), False, 'import os\n'), ((4899, 4908), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4906, 4908), False, 'from io import BytesIO\n'), ((2528, 2567), 'os.path.join', 'os.path.join', (['path', 'f"""{state_name}.jpg"""'], {}), "(path, f'{state_name}.jpg')\n", (2540, 2567), False, 'import os\n'), ((3722, 3749), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (3743, 3749), False, 'from llama_index import SimpleDirectoryReader\n'), ((3934, 4053), 'pydantic_llm.conditions_report_initial_prompt_str.format', 'conditions_report_initial_prompt_str.format', ([], {'make_name': 'selected_make', 'model_name': 'selected_model', 'year': 'selected_year'}), '(make_name=selected_make,\n model_name=selected_model, year=selected_year)\n', (3977, 4053), False, 'from pydantic_llm import pydantic_llm, DamagedParts, damages_initial_prompt_str, ConditionsReport, conditions_report_initial_prompt_str\n')] |
from typing import TYPE_CHECKING, Any, Optional
from llama_index.legacy.core.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.legacy.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.legacy.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
class QueryEngineTool(AsyncBaseTool):
"""Query engine tool.
A tool making use of a query engine.
Args:
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
resolve_input_errors: bool = True,
) -> None:
self._query_engine = query_engine
self._metadata = metadata
self._resolve_input_errors = resolve_input_errors
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
resolve_input_errors: bool = True,
) -> "QueryEngineTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
query_engine=query_engine,
metadata=metadata,
resolve_input_errors=resolve_input_errors,
)
@property
def query_engine(self) -> BaseQueryEngine:
return self._query_engine
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError(
"Cannot call query engine without specifying `input` parameter."
)
response = self._query_engine.query(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError("Cannot call query engine without inputs")
response = await self._query_engine.aquery(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
from llama_index.legacy.langchain_helpers.agents.tools import (
IndexToolConfig,
LlamaIndexTool,
)
tool_config = IndexToolConfig(
query_engine=self.query_engine,
name=self.metadata.name,
description=self.metadata.description,
)
return LlamaIndexTool.from_tool_config(tool_config=tool_config)
| [
"llama_index.legacy.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config",
"llama_index.legacy.tools.types.ToolMetadata",
"llama_index.legacy.langchain_helpers.agents.tools.IndexToolConfig"
] | [((1408, 1456), 'llama_index.legacy.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1420, 1456), False, 'from llama_index.legacy.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3568, 3683), 'llama_index.legacy.langchain_helpers.agents.tools.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'self.query_engine', 'name': 'self.metadata.name', 'description': 'self.metadata.description'}), '(query_engine=self.query_engine, name=self.metadata.name,\n description=self.metadata.description)\n', (3583, 3683), False, 'from llama_index.legacy.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n'), ((3742, 3798), 'llama_index.legacy.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', ([], {'tool_config': 'tool_config'}), '(tool_config=tool_config)\n', (3773, 3798), False, 'from llama_index.legacy.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n')] |
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"EvaluatingLlmSurveyPaperDataset", "./data"
)
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
main()
| [
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.VectorStoreIndex.from_documents"
] | [((249, 316), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""EvaluatingLlmSurveyPaperDataset"""', '"""./data"""'], {}), "('EvaluatingLlmSurveyPaperDataset', './data')\n", (271, 316), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((375, 427), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (406, 427), False, 'from llama_index.core import VectorStoreIndex\n'), ((520, 569), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (539, 569), False, 'from llama_index.core.llama_pack import download_llama_pack\n')] |
# !pip install llama-index faiss-cpu llama-index-vector-stores-faiss
import faiss
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.faiss import FaissVectorStore
from llama_index.core import get_response_synthesizer
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
if __name__ == "__main__":
import os
# Instructions:
# Run the script with the following command: python constrained_rag.py
# Ensure to have the products directory in the same directory as this script
# Ensure to have the OPENAI_API_KEY environment variable set
assert os.getenv("OPENAI_API_KEY") is not None, "Please set OPENAI_API_KEY"
# load document vectors
documents = SimpleDirectoryReader("products/").load_data()
# load faiss index
d = 1536 # dimension of the vectors
faiss_index = faiss.IndexFlatL2(d)
# create vector store
vector_store = FaissVectorStore(faiss_index=faiss_index)
# initialize storage context
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# create index
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
# Configure retriever
retriever = VectorIndexRetriever(index=index, similarity_top_k=1)
QA_PROMPT_TMPL = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given only the context information and no prior knowledge, "
"answer the query.\n"
"Query: {query_str}\n"
"Answer: "
"Otherwise, state: I cannot answer."
)
STRICT_QA_PROMPT = PromptTemplate(
QA_PROMPT_TMPL, prompt_type=PromptType.QUESTION_ANSWER
)
# Configure response synthesizer
response_synthesizer = get_response_synthesizer(
structured_answer_filtering=True,
response_mode="refine",
text_qa_template=STRICT_QA_PROMPT,
)
# Assemble query engine
safe_query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
# Execute query and evaluate response
print(safe_query_engine.query("describe a summer dress with price"))
print(safe_query_engine.query("describe a horse"))
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.retrievers.VectorIndexRetriever",
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.query_engine.RetrieverQueryEngine",
"llama_index.vector_stores.faiss.FaissVectorStore",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.get_response_synthesizer"
] | [((1083, 1103), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (1100, 1103), False, 'import faiss\n'), ((1150, 1191), 'llama_index.vector_stores.faiss.FaissVectorStore', 'FaissVectorStore', ([], {'faiss_index': 'faiss_index'}), '(faiss_index=faiss_index)\n', (1166, 1191), False, 'from llama_index.vector_stores.faiss import FaissVectorStore\n'), ((1247, 1302), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1275, 1302), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext\n'), ((1334, 1409), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (1365, 1409), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext\n'), ((1453, 1506), 'llama_index.core.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(1)'}), '(index=index, similarity_top_k=1)\n', (1473, 1506), False, 'from llama_index.core.retrievers import VectorIndexRetriever\n'), ((1891, 1961), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['QA_PROMPT_TMPL'], {'prompt_type': 'PromptType.QUESTION_ANSWER'}), '(QA_PROMPT_TMPL, prompt_type=PromptType.QUESTION_ANSWER)\n', (1905, 1961), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2041, 2163), 'llama_index.core.get_response_synthesizer', 'get_response_synthesizer', ([], {'structured_answer_filtering': '(True)', 'response_mode': '"""refine"""', 'text_qa_template': 'STRICT_QA_PROMPT'}), "(structured_answer_filtering=True, response_mode=\n 'refine', text_qa_template=STRICT_QA_PROMPT)\n", (2065, 2163), False, 'from llama_index.core import get_response_synthesizer\n'), ((2243, 2332), 'llama_index.core.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (2263, 2332), False, 'from llama_index.core.query_engine import RetrieverQueryEngine\n'), ((839, 866), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (848, 866), False, 'import os\n'), ((953, 987), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""products/"""'], {}), "('products/')\n", (974, 987), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext\n')] |
from dotenv import load_dotenv
from llama_index.llms import OpenAI
from llama_index.prompts import PromptTemplate
from retriever import run_retrieval
import nest_asyncio
import asyncio
nest_asyncio.apply()
async def acombine_results(
texts,
query_str,
qa_prompt,
llm,
cur_prompt_list,
num_children,
):
fmt_prompts = []
for idx in range(0, len(texts), num_children):
text_batch = texts[idx : idx + num_children]
context_str = "\n\n".join([t for t in text_batch])
fmt_qa_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)
# print(f"*****Prompt******:\n{fmt_qa_prompt}\n\n")
fmt_prompts.append(fmt_qa_prompt)
cur_prompt_list.append(fmt_qa_prompt)
tasks = [llm.acomplete(p) for p in fmt_prompts]
combined_responses = await asyncio.gather(*tasks)
new_texts = [str(r) for r in combined_responses]
if len(new_texts) == 1:
return new_texts[0]
else:
return await acombine_results(
new_texts,
query_str,
qa_prompt,
llm,
cur_prompt_list,
num_children=num_children,
)
async def agenerate_response_hs(retrieved_nodes, query_str, qa_prompt, llm):
"""Generate a response using hierarchical summarization strategy.
Combine num_children nodes hierarchically until we get one root node.
"""
fmt_prompts = []
node_responses = []
for node in retrieved_nodes:
context_str = str(node.metadata) + "\n" + node.get_content()
fmt_qa_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)
print(f"*****Prompt******:\n{fmt_qa_prompt}\n\n")
fmt_prompts.append(fmt_qa_prompt)
tasks = [llm.acomplete(p) for p in fmt_prompts]
node_responses = await asyncio.gather(*tasks)
response_txt = await acombine_results(
[str(r) for r in node_responses],
query_str,
qa_prompt,
llm,
fmt_prompts,
num_children=10,
)
return response_txt, fmt_prompts
async def run_synthesizer(query_str):
llm = OpenAI(model_name="gpt-3.5-turbo")
qa_prompt = PromptTemplate(
"""\
Your are a personal assistant that should answer a query based on the users obsidian notes.
The context information from these notes is below.
---------------------
{context_str}
---------------------
Provide a response based on the context provided, without fabricating information.
If you lack the necessary information, simply state 'I don't know.'
You may include additional information in your response,
but clearly indicate that it is a personal assistant's addition.
Query: {query_str}
Answer: \
"""
)
retrieved_nodes = run_retrieval(query_str)
# context_str = "\n\n".join(
# ["%s\n%s" % (str(r.metadata), r.get_content()) for r in retrieved_nodes]
# )
# fmt_qa_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)
# response = llm.complete(fmt_qa_prompt)
response, fmt_prompts = await agenerate_response_hs(
retrieved_nodes, query_str, qa_prompt, llm
)
# print(f"*****Prompt******:\n{fmt_prompts}\n\n")
print(f"*****Response******:\n{response}\n\n")
return str(response)
if __name__ == "__main__":
load_dotenv()
response = run_synthesizer("Write a technical Web3 blog post in my style.")
# print(f"*****Response******:\n{response}\n\n")
| [
"llama_index.llms.OpenAI",
"llama_index.prompts.PromptTemplate"
] | [((189, 209), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (207, 209), False, 'import nest_asyncio\n'), ((2126, 2160), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (2132, 2160), False, 'from llama_index.llms import OpenAI\n'), ((2177, 2807), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['""" Your are a personal assistant that should answer a query based on the users obsidian notes. \n The context information from these notes is below.\n ---------------------\n {context_str}\n ---------------------\n Provide a response based on the context provided, without fabricating information.\n If you lack the necessary information, simply state \'I don\'t know.\'\n You may include additional information in your response,\n but clearly indicate that it is a personal assistant\'s addition.\n Query: {query_str}\n Answer: """'], {}), '(\n """ Your are a personal assistant that should answer a query based on the users obsidian notes. \n The context information from these notes is below.\n ---------------------\n {context_str}\n ---------------------\n Provide a response based on the context provided, without fabricating information.\n If you lack the necessary information, simply state \'I don\'t know.\'\n You may include additional information in your response,\n but clearly indicate that it is a personal assistant\'s addition.\n Query: {query_str}\n Answer: """\n )\n', (2191, 2807), False, 'from llama_index.prompts import PromptTemplate\n'), ((2839, 2863), 'retriever.run_retrieval', 'run_retrieval', (['query_str'], {}), '(query_str)\n', (2852, 2863), False, 'from retriever import run_retrieval\n'), ((3396, 3409), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (3407, 3409), False, 'from dotenv import load_dotenv\n'), ((835, 857), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (849, 857), False, 'import asyncio\n'), ((1826, 1848), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (1840, 1848), False, 'import asyncio\n')] |
from pathlib import Path
from llama_index import download_loader
from llama_index import SimpleDirectoryReader
PDFReader = download_loader("PDFReader")
def getdocument(filename : str,filetype:str):
if filetype == "pdf":
loader = PDFReader()
elif filetype == "txt":
loader = SimpleDirectoryReader('./example')
document = loader.load_data(file=Path(filename))
return document | [
"llama_index.download_loader",
"llama_index.SimpleDirectoryReader"
] | [((124, 152), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (139, 152), False, 'from llama_index import download_loader\n'), ((300, 334), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./example"""'], {}), "('./example')\n", (321, 334), False, 'from llama_index import SimpleDirectoryReader\n'), ((380, 394), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (384, 394), False, 'from pathlib import Path\n')] |
import faiss
import openai
from llama_index.readers.file.epub_parser import EpubParser
# create an index with the text and save it to disk in data/indexes
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor
from langchain.chat_models import ChatOpenAI
from llama_index import GPTTreeIndex
import os
from llama_index import SummaryPrompt, QuestionAnswerPrompt
# set environment variable with OPENAI_API_KEY
os.environ["OPENAI_API_KEY"] = "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"
class Sage:
def __init__(self, model_name: str = "gpt-3.5-turbo", history = None):
"""
Initializes the Sage class with the given API key.
"""
self.model_name = model_name
self._index=None
self._docs = None
self.response = None
self.load_model()
def load_book(self, book_file_path_list: list = [""], book_dir_path: str = "") -> None:
"""
Loads the book document from the given file path and create index.
"""
self._docs = SimpleDirectoryReader(input_dir = book_dir_path, input_files = book_file_path_list).load_data()
self._index = GPTSimpleVectorIndex(documents=self._docs)
def load_model(self) -> None:
"""
Load the Open AI Model, book and index embeddings
"""
self.llm_predictor = LLMPredictor(llm=ChatOpenAI(model_name=self.model_name))
def run(self, query: str) -> str:
"""
Generate response.
"""
self.response = self._index.query(query,llm_predictor=self.llm_predictor,
similarity_top_k=3)
return f"<b>{self.response}</b>"
if __name__ == "__main__":
book_talker = Sage(model_name = "gpt-3.5-turbo")
book_talker.load_book(book_file_path_list = ["test_data/epubs/SeeingLikeAState/SeeingLikeAState.epub"])
print(book_talker.run('Summarize the book'))
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((1175, 1217), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', ([], {'documents': 'self._docs'}), '(documents=self._docs)\n', (1195, 1217), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor\n'), ((1057, 1136), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'book_dir_path', 'input_files': 'book_file_path_list'}), '(input_dir=book_dir_path, input_files=book_file_path_list)\n', (1078, 1136), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor\n'), ((1389, 1427), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name'}), '(model_name=self.model_name)\n', (1399, 1427), False, 'from langchain.chat_models import ChatOpenAI\n')] |
# -*- coding: utf-8 -*-
# @place: Pudong, Shanghai
# @file: query_rewrite_ensemble_retriever.py
# @time: 2023/12/28 13:49
# -*- coding: utf-8 -*-
# @place: Pudong, Shanghai
# @file: ensemble_retriever.py
# @time: 2023/12/26 18:50
import json
from typing import List
from operator import itemgetter
from llama_index.schema import TextNode
from llama_index.schema import NodeWithScore
from llama_index.retrievers import BaseRetriever
from llama_index.indices.query.schema import QueryType
from preprocess.get_text_id_mapping import text_node_id_mapping
from custom_retriever.bm25_retriever import CustomBM25Retriever
from custom_retriever.vector_store_retriever import VectorSearchRetriever
class QueryRewriteEnsembleRetriever(BaseRetriever):
def __init__(self, top_k, faiss_index):
super().__init__()
self.c: int = 60
self.faiss_index = faiss_index
self.top_k = top_k
self.embedding_retriever = VectorSearchRetriever(top_k=self.top_k, faiss_index=faiss_index, query_rewrite=True)
with open('../data/query_rewrite.json', 'r') as f:
self.query_write_dict = json.loads(f.read())
def _retrieve(self, query: QueryType) -> List[NodeWithScore]:
doc_lists = []
bm25_search_nodes = CustomBM25Retriever(top_k=self.top_k).retrieve(query.query_str)
doc_lists.append([node.text for node in bm25_search_nodes])
embedding_search_nodes = self.embedding_retriever.retrieve(query.query_str)
doc_lists.append([node.text for node in embedding_search_nodes])
# check: need query rewrite
if len(set([_.id_ for _ in bm25_search_nodes]) & set([_.id_ for _ in embedding_search_nodes])) == 0:
print(query.query_str)
for search_query in self.query_write_dict[query.query_str]:
bm25_search_nodes = CustomBM25Retriever(top_k=self.top_k).retrieve(search_query)
doc_lists.append([node.text for node in bm25_search_nodes])
embedding_search_nodes = self.embedding_retriever.retrieve(search_query)
doc_lists.append([node.text for node in embedding_search_nodes])
# Create a union of all unique documents in the input doc_lists
all_documents = set()
for doc_list in doc_lists:
for doc in doc_list:
all_documents.add(doc)
# print(all_documents)
# Initialize the RRF score dictionary for each document
rrf_score_dic = {doc: 0.0 for doc in all_documents}
# Calculate RRF scores for each document
for doc_list, weight in zip(doc_lists, [1/len(doc_lists)] * len(doc_lists)):
for rank, doc in enumerate(doc_list, start=1):
rrf_score = weight * (1 / (rank + self.c))
rrf_score_dic[doc] += rrf_score
# Sort documents by their RRF scores in descending order
sorted_documents = sorted(rrf_score_dic.items(), key=itemgetter(1), reverse=True)
result = []
for sorted_doc in sorted_documents[:self.top_k]:
text, score = sorted_doc
node_with_score = NodeWithScore(node=TextNode(text=text,
id_=text_node_id_mapping[text]),
score=score)
result.append(node_with_score)
return result
if __name__ == '__main__':
from faiss import IndexFlatIP
from pprint import pprint
faiss_index = IndexFlatIP(1536)
ensemble_retriever = QueryRewriteEnsembleRetriever(top_k=3, faiss_index=faiss_index)
query = "半导体制造设备市场美、日、荷各占多少份额?"
t_result = ensemble_retriever.retrieve(str_or_query_bundle=query)
pprint(t_result)
faiss_index.reset()
| [
"llama_index.schema.TextNode"
] | [((3476, 3493), 'faiss.IndexFlatIP', 'IndexFlatIP', (['(1536)'], {}), '(1536)\n', (3487, 3493), False, 'from faiss import IndexFlatIP\n'), ((3693, 3709), 'pprint.pprint', 'pprint', (['t_result'], {}), '(t_result)\n', (3699, 3709), False, 'from pprint import pprint\n'), ((942, 1030), 'custom_retriever.vector_store_retriever.VectorSearchRetriever', 'VectorSearchRetriever', ([], {'top_k': 'self.top_k', 'faiss_index': 'faiss_index', 'query_rewrite': '(True)'}), '(top_k=self.top_k, faiss_index=faiss_index,\n query_rewrite=True)\n', (963, 1030), False, 'from custom_retriever.vector_store_retriever import VectorSearchRetriever\n'), ((1261, 1298), 'custom_retriever.bm25_retriever.CustomBM25Retriever', 'CustomBM25Retriever', ([], {'top_k': 'self.top_k'}), '(top_k=self.top_k)\n', (1280, 1298), False, 'from custom_retriever.bm25_retriever import CustomBM25Retriever\n'), ((2939, 2952), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (2949, 2952), False, 'from operator import itemgetter\n'), ((3131, 3182), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'id_': 'text_node_id_mapping[text]'}), '(text=text, id_=text_node_id_mapping[text])\n', (3139, 3182), False, 'from llama_index.schema import TextNode\n'), ((1838, 1875), 'custom_retriever.bm25_retriever.CustomBM25Retriever', 'CustomBM25Retriever', ([], {'top_k': 'self.top_k'}), '(top_k=self.top_k)\n', (1857, 1875), False, 'from custom_retriever.bm25_retriever import CustomBM25Retriever\n')] |
"""Utils for jupyter notebook."""
import os
from io import BytesIO
from typing import Any, Dict, List, Tuple
import matplotlib.pyplot as plt
import requests
from IPython.display import Markdown, display
from llama_index.core.base.response.schema import Response
from llama_index.core.img_utils import b64_2_img
from llama_index.core.schema import ImageNode, MetadataMode, NodeWithScore
from llama_index.core.utils import truncate_text
from PIL import Image
DEFAULT_THUMBNAIL_SIZE = (512, 512)
DEFAULT_IMAGE_MATRIX = (3, 3)
DEFAULT_SHOW_TOP_K = 3
def display_image(img_str: str, size: Tuple[int, int] = DEFAULT_THUMBNAIL_SIZE) -> None:
"""Display base64 encoded image str as image for jupyter notebook."""
img = b64_2_img(img_str)
img.thumbnail(size)
display(img)
def display_image_uris(
image_paths: List[str],
image_matrix: Tuple[int, int] = DEFAULT_IMAGE_MATRIX,
top_k: int = DEFAULT_SHOW_TOP_K,
) -> None:
"""Display base64 encoded image str as image for jupyter notebook."""
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths[:top_k]:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(image_matrix[0], image_matrix[1], images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= image_matrix[0] * image_matrix[1]:
break
def display_source_node(
source_node: NodeWithScore,
source_length: int = 100,
show_source_metadata: bool = False,
metadata_mode: MetadataMode = MetadataMode.NONE,
) -> None:
"""Display source node for jupyter notebook."""
source_text_fmt = truncate_text(
source_node.node.get_content(metadata_mode=metadata_mode).strip(), source_length
)
text_md = (
f"**Node ID:** {source_node.node.node_id}<br>"
f"**Similarity:** {source_node.score}<br>"
f"**Text:** {source_text_fmt}<br>"
)
if show_source_metadata:
text_md += f"**Metadata:** {source_node.node.metadata}<br>"
if isinstance(source_node.node, ImageNode):
text_md += "**Image:**"
display(Markdown(text_md))
if isinstance(source_node.node, ImageNode) and source_node.node.image is not None:
display_image(source_node.node.image)
def display_metadata(metadata: Dict[str, Any]) -> None:
"""Display metadata for jupyter notebook."""
display(metadata)
def display_response(
response: Response,
source_length: int = 100,
show_source: bool = False,
show_metadata: bool = False,
show_source_metadata: bool = False,
) -> None:
"""Display response for jupyter notebook."""
if response.response is None:
response_text = "None"
else:
response_text = response.response.strip()
display(Markdown(f"**`Final Response:`** {response_text}"))
if show_source:
for ind, source_node in enumerate(response.source_nodes):
display(Markdown("---"))
display(
Markdown(f"**`Source Node {ind + 1}/{len(response.source_nodes)}`**")
)
display_source_node(
source_node,
source_length=source_length,
show_source_metadata=show_source_metadata,
)
if show_metadata:
if response.metadata is not None:
display_metadata(response.metadata)
def display_query_and_multimodal_response(
query_str: str, response: Response, plot_height: int = 2, plot_width: int = 5
) -> None:
"""For displaying a query and its multi-modal response."""
if response.metadata:
image_nodes = response.metadata["image_nodes"] or []
else:
image_nodes = []
num_subplots = len(image_nodes)
f, axarr = plt.subplots(1, num_subplots)
f.set_figheight(plot_height)
f.set_figwidth(plot_width)
ix = 0
for ix, scored_img_node in enumerate(image_nodes):
img_node = scored_img_node.node
image = None
if img_node.image_url:
img_response = requests.get(img_node.image_url)
image = Image.open(BytesIO(img_response.content))
elif img_node.image_path:
image = Image.open(img_node.image_path).convert("RGB")
else:
raise ValueError(
"A retrieved image must have image_path or image_url specified."
)
if num_subplots > 1:
axarr[ix].imshow(image)
axarr[ix].set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9)
else:
axarr.imshow(image)
axarr.set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9)
f.tight_layout()
print(f"Query: {query_str}\n=======")
print(f"Retrieved Images:\n")
plt.show()
print("=======")
print(f"Response: {response.response}\n=======\n")
| [
"llama_index.core.img_utils.b64_2_img"
] | [((723, 741), 'llama_index.core.img_utils.b64_2_img', 'b64_2_img', (['img_str'], {}), '(img_str)\n', (732, 741), False, 'from llama_index.core.img_utils import b64_2_img\n'), ((770, 782), 'IPython.display.display', 'display', (['img'], {}), '(img)\n', (777, 782), False, 'from IPython.display import Markdown, display\n'), ((1042, 1069), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1052, 1069), True, 'import matplotlib.pyplot as plt\n'), ((2470, 2487), 'IPython.display.display', 'display', (['metadata'], {}), '(metadata)\n', (2477, 2487), False, 'from IPython.display import Markdown, display\n'), ((3831, 3860), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'num_subplots'], {}), '(1, num_subplots)\n', (3843, 3860), True, 'import matplotlib.pyplot as plt\n'), ((4816, 4826), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4824, 4826), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1146), 'os.path.isfile', 'os.path.isfile', (['img_path'], {}), '(img_path)\n', (1136, 1146), False, 'import os\n'), ((2207, 2224), 'IPython.display.Markdown', 'Markdown', (['text_md'], {}), '(text_md)\n', (2215, 2224), False, 'from IPython.display import Markdown, display\n'), ((2868, 2918), 'IPython.display.Markdown', 'Markdown', (['f"""**`Final Response:`** {response_text}"""'], {}), "(f'**`Final Response:`** {response_text}')\n", (2876, 2918), False, 'from IPython.display import Markdown, display\n'), ((1168, 1188), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1178, 1188), False, 'from PIL import Image\n'), ((1202, 1265), 'matplotlib.pyplot.subplot', 'plt.subplot', (['image_matrix[0]', 'image_matrix[1]', '(images_shown + 1)'], {}), '(image_matrix[0], image_matrix[1], images_shown + 1)\n', (1213, 1265), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1295), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1288, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1322), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1318, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1349), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1345, 1349), True, 'import matplotlib.pyplot as plt\n'), ((4110, 4142), 'requests.get', 'requests.get', (['img_node.image_url'], {}), '(img_node.image_url)\n', (4122, 4142), False, 'import requests\n'), ((3026, 3041), 'IPython.display.Markdown', 'Markdown', (['"""---"""'], {}), "('---')\n", (3034, 3041), False, 'from IPython.display import Markdown, display\n'), ((4174, 4203), 'io.BytesIO', 'BytesIO', (['img_response.content'], {}), '(img_response.content)\n', (4181, 4203), False, 'from io import BytesIO\n'), ((4259, 4290), 'PIL.Image.open', 'Image.open', (['img_node.image_path'], {}), '(img_node.image_path)\n', (4269, 4290), False, 'from PIL import Image\n')] |
from typing import Optional, Type
from llama_index.legacy.download.module import (
LLAMA_HUB_URL,
MODULE_TYPE,
download_llama_module,
track_download,
)
from llama_index.legacy.llama_pack.base import BaseLlamaPack
def download_llama_pack(
llama_pack_class: str,
download_dir: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = True,
skip_load: bool = False,
) -> Optional[Type[BaseLlamaPack]]:
"""Download a single LlamaPack from Llama Hub.
Args:
llama_pack_class: The name of the LlamaPack class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
download_dir: Custom dirpath to download the pack into.
Returns:
A Loader.
"""
pack_cls = download_llama_module(
llama_pack_class,
llama_hub_url=llama_hub_url,
refresh_cache=refresh_cache,
custom_path=download_dir,
library_path="llama_packs/library.json",
disable_library_cache=True,
override_path=True,
skip_load=skip_load,
)
track_download(llama_pack_class, MODULE_TYPE.LLAMAPACK)
if pack_cls is None:
return None
if not issubclass(pack_cls, BaseLlamaPack):
raise ValueError(f"Tool class {pack_cls} must be a subclass of BaseToolSpec.")
return pack_cls
| [
"llama_index.legacy.download.module.download_llama_module",
"llama_index.legacy.download.module.track_download"
] | [((887, 1134), 'llama_index.legacy.download.module.download_llama_module', 'download_llama_module', (['llama_pack_class'], {'llama_hub_url': 'llama_hub_url', 'refresh_cache': 'refresh_cache', 'custom_path': 'download_dir', 'library_path': '"""llama_packs/library.json"""', 'disable_library_cache': '(True)', 'override_path': '(True)', 'skip_load': 'skip_load'}), "(llama_pack_class, llama_hub_url=llama_hub_url,\n refresh_cache=refresh_cache, custom_path=download_dir, library_path=\n 'llama_packs/library.json', disable_library_cache=True, override_path=\n True, skip_load=skip_load)\n", (908, 1134), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n'), ((1196, 1251), 'llama_index.legacy.download.module.track_download', 'track_download', (['llama_pack_class', 'MODULE_TYPE.LLAMAPACK'], {}), '(llama_pack_class, MODULE_TYPE.LLAMAPACK)\n', (1210, 1251), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n')] |
# Debug stuff
#import os
#import readline
#print("Current Working Directory:", os.getcwd())
#env_var = os.getenv('OPENAI_API_KEY')
#print(env_var)
# Sets llama-index
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os.path
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
# check if storage already exists
PERSIST_DIR = "./python/.storage"
if not os.path.exists(PERSIST_DIR):
# load the documents and create the index
documents = SimpleDirectoryReader("python/data").load_data()
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# either way we can now query the index
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader"
] | [((194, 253), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (213, 253), False, 'import logging\n'), ((285, 325), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (306, 325), False, 'import logging\n'), ((697, 739), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (728, 739), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((882, 935), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (910, 935), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((948, 988), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (971, 988), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((254, 273), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (271, 273), False, 'import logging\n'), ((636, 672), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""python/data"""'], {}), "('python/data')\n", (657, 672), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n')] |
import os, streamlit as st
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext
from langchain.llms.openai import OpenAI
# Define a simple Streamlit app
st.title("Ask Llama")
query = st.text_input("What would you like to ask? (source: data/Create.txt)", "")
# If the 'Submit' button is clicked
if st.button("Submit"):
if not query.strip():
st.error(f"Please provide the search query.")
else:
try:
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))
# Configure prompt parameters and initialise helper
max_input_size = 4096
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
documents = SimpleDirectoryReader('data').load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
response = index.query(query)
st.success(response)
except Exception as e:
st.error(f"An error occurred: {e}")
| [
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader"
] | [((396, 417), 'streamlit.title', 'st.title', (['"""Ask Llama"""'], {}), "('Ask Llama')\n", (404, 417), True, 'import os, streamlit as st\n'), ((426, 500), 'streamlit.text_input', 'st.text_input', (['"""What would you like to ask? (source: data/Create.txt)"""', '""""""'], {}), "('What would you like to ask? (source: data/Create.txt)', '')\n", (439, 500), True, 'import os, streamlit as st\n'), ((541, 560), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (550, 560), True, 'import os, streamlit as st\n'), ((596, 641), 'streamlit.error', 'st.error', (['f"""Please provide the search query."""'], {}), "(f'Please provide the search query.')\n", (604, 641), True, 'import os, streamlit as st\n'), ((1048, 1107), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1060, 1107), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext\n'), ((1260, 1351), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1288, 1351), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext\n'), ((1367, 1446), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1402, 1446), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext\n'), ((1514, 1534), 'streamlit.success', 'st.success', (['response'], {}), '(response)\n', (1524, 1534), True, 'import os, streamlit as st\n'), ((1578, 1613), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (1586, 1613), True, 'import os, streamlit as st\n'), ((802, 854), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (808, 854), False, 'from langchain.llms.openai import OpenAI\n'), ((1188, 1217), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1209, 1217), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext\n')] |
import os, streamlit as st
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper
from langchain import OpenAI
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))
# Configure prompt parameters and initialise helper
max_input_size = 4096
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
documents = SimpleDirectoryReader('data').load_data()
index = GPTSimpleVectorIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
# Define a simple Streamlit app
st.title("Ask Llama")
query = st.text_input("What would you like to ask?", "")
if st.button("Submit"):
response = index.query(query)
st.write(response)
| [
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((635, 694), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (647, 694), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper\n'), ((801, 895), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (821, 895), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper\n'), ((930, 951), 'streamlit.title', 'st.title', (['"""Ask Llama"""'], {}), "('Ask Llama')\n", (938, 951), True, 'import os, streamlit as st\n'), ((960, 1008), 'streamlit.text_input', 'st.text_input', (['"""What would you like to ask?"""', '""""""'], {}), "('What would you like to ask?', '')\n", (973, 1008), True, 'import os, streamlit as st\n'), ((1013, 1032), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (1022, 1032), True, 'import os, streamlit as st\n'), ((1072, 1090), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1080, 1090), True, 'import os, streamlit as st\n'), ((449, 501), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (455, 501), False, 'from langchain import OpenAI\n'), ((751, 780), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (772, 780), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper\n')] |
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.multi_modal import MultiModalVectorIndexRetriever
from llama_index.core.indices.query.base import BaseQueryEngine
from llama_index.core.indices.query.schema import QueryBundle, QueryType
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.schema import ImageNode, NodeWithScore
def _get_image_and_text_nodes(
nodes: List[NodeWithScore],
) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:
image_nodes = []
text_nodes = []
for res_node in nodes:
if isinstance(res_node.node, ImageNode):
image_nodes.append(res_node)
else:
text_nodes.append(res_node)
return image_nodes, text_nodes
class SimpleMultiModalQueryEngine(BaseQueryEngine):
"""Simple Multi Modal Retriever query engine.
Assumes that retrieved text context fits within context window of LLM, along with images.
Args:
retriever (MultiModalVectorIndexRetriever): A retriever object.
multi_modal_llm (Optional[MultiModalLLM]): MultiModalLLM Models.
text_qa_template (Optional[BasePromptTemplate]): Text QA Prompt Template.
image_qa_template (Optional[BasePromptTemplate]): Image QA Prompt Template.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Node Postprocessors.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
def __init__(
self,
retriever: MultiModalVectorIndexRetriever,
multi_modal_llm: Optional[MultiModalLLM] = None,
text_qa_template: Optional[BasePromptTemplate] = None,
image_qa_template: Optional[BasePromptTemplate] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
self._retriever = retriever
if multi_modal_llm:
self._multi_modal_llm = multi_modal_llm
else:
try:
from llama_index.multi_modal_llms.openai import (
OpenAIMultiModal,
) # pants: no-infer-dep
self._multi_modal_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", max_new_tokens=1000
)
except ImportError as e:
raise ImportError(
"`llama-index-multi-modal-llms-openai` package cannot be found. "
"Please install it by using `pip install `llama-index-multi-modal-llms-openai`"
)
self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
self._image_qa_template = image_qa_template or DEFAULT_TEXT_QA_PROMPT
self._node_postprocessors = node_postprocessors or []
callback_manager = callback_manager or CallbackManager([])
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = callback_manager
super().__init__(callback_manager)
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {"text_qa_template": self._text_qa_template}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
image_nodes, text_nodes = _get_image_and_text_nodes(nodes)
context_str = "\n\n".join([r.get_content() for r in text_nodes])
fmt_prompt = self._text_qa_template.format(
context_str=context_str, query_str=query_bundle.query_str
)
llm_response = self._multi_modal_llm.complete(
prompt=fmt_prompt,
image_documents=[image_node.node for image_node in image_nodes],
)
return Response(
response=str(llm_response),
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
def _get_response_with_images(
self,
prompt_str: str,
image_nodes: List[ImageNode],
) -> RESPONSE_TYPE:
fmt_prompt = self._image_qa_template.format(
query_str=prompt_str,
)
llm_response = self._multi_modal_llm.complete(
prompt=fmt_prompt,
image_documents=[image_node.node for image_node in image_nodes],
)
return Response(
response=str(llm_response),
source_nodes=image_nodes,
metadata={"image_nodes": image_nodes},
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
image_nodes, text_nodes = _get_image_and_text_nodes(nodes)
context_str = "\n\n".join([r.get_content() for r in text_nodes])
fmt_prompt = self._text_qa_template.format(
context_str=context_str, query_str=query_bundle.query_str
)
llm_response = await self._multi_modal_llm.acomplete(
prompt=fmt_prompt,
image_documents=[image_node.node for image_node in image_nodes],
)
return Response(
response=str(llm_response),
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = self.retrieve(query_bundle)
retrieve_event.on_end(
payload={EventPayload.NODES: nodes},
)
response = self.synthesize(
query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
def image_query(self, image_path: QueryType, prompt_str: str) -> RESPONSE_TYPE:
"""Answer a image query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: str(image_path)}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: str(image_path)},
) as retrieve_event:
nodes = self._retriever.image_to_image_retrieve(image_path)
retrieve_event.on_end(
payload={EventPayload.NODES: nodes},
)
image_nodes, _ = _get_image_and_text_nodes(nodes)
response = self._get_response_with_images(
prompt_str=prompt_str,
image_nodes=image_nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = await self.aretrieve(query_bundle)
retrieve_event.on_end(
payload={EventPayload.NODES: nodes},
)
response = await self.asynthesize(
query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@property
def retriever(self) -> MultiModalVectorIndexRetriever:
"""Get the retriever object."""
return self._retriever
| [
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.multi_modal_llms.openai.OpenAIMultiModal"
] | [((3353, 3372), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (3368, 3372), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((2707, 2774), 'llama_index.multi_modal_llms.openai.OpenAIMultiModal', 'OpenAIMultiModal', ([], {'model': '"""gpt-4-vision-preview"""', 'max_new_tokens': '(1000)'}), "(model='gpt-4-vision-preview', max_new_tokens=1000)\n", (2723, 2774), False, 'from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n')] |
import json
from typing import Sequence
from llama_index.legacy.prompts.base import PromptTemplate
from llama_index.legacy.question_gen.types import SubQuestion
from llama_index.legacy.tools.types import ToolMetadata
# deprecated, kept for backward compatibility
SubQuestionPrompt = PromptTemplate
def build_tools_text(tools: Sequence[ToolMetadata]) -> str:
tools_dict = {}
for tool in tools:
tools_dict[tool.name] = tool.description
return json.dumps(tools_dict, indent=4)
PREFIX = """\
Given a user question, and a list of tools, output a list of relevant sub-questions \
in json markdown that when composed can help answer the full user question:
"""
example_query_str = (
"Compare and contrast the revenue growth and EBITDA of Uber and Lyft for year 2021"
)
example_tools = [
ToolMetadata(
name="uber_10k",
description="Provides information about Uber financials for year 2021",
),
ToolMetadata(
name="lyft_10k",
description="Provides information about Lyft financials for year 2021",
),
]
example_tools_str = build_tools_text(example_tools)
example_output = [
SubQuestion(
sub_question="What is the revenue growth of Uber", tool_name="uber_10k"
),
SubQuestion(sub_question="What is the EBITDA of Uber", tool_name="uber_10k"),
SubQuestion(
sub_question="What is the revenue growth of Lyft", tool_name="lyft_10k"
),
SubQuestion(sub_question="What is the EBITDA of Lyft", tool_name="lyft_10k"),
]
example_output_str = json.dumps({"items": [x.dict() for x in example_output]}, indent=4)
EXAMPLES = f"""\
# Example 1
<Tools>
```json
{example_tools_str}
```
<User Question>
{example_query_str}
<Output>
```json
{example_output_str}
```
""".replace(
"{", "{{"
).replace(
"}", "}}"
)
SUFFIX = """\
# Example 2
<Tools>
```json
{tools_str}
```
<User Question>
{query_str}
<Output>
"""
DEFAULT_SUB_QUESTION_PROMPT_TMPL = PREFIX + EXAMPLES + SUFFIX
| [
"llama_index.legacy.question_gen.types.SubQuestion",
"llama_index.legacy.tools.types.ToolMetadata"
] | [((465, 497), 'json.dumps', 'json.dumps', (['tools_dict'], {'indent': '(4)'}), '(tools_dict, indent=4)\n', (475, 497), False, 'import json\n'), ((817, 923), 'llama_index.legacy.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': '"""uber_10k"""', 'description': '"""Provides information about Uber financials for year 2021"""'}), "(name='uber_10k', description=\n 'Provides information about Uber financials for year 2021')\n", (829, 923), False, 'from llama_index.legacy.tools.types import ToolMetadata\n'), ((947, 1053), 'llama_index.legacy.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': '"""lyft_10k"""', 'description': '"""Provides information about Lyft financials for year 2021"""'}), "(name='lyft_10k', description=\n 'Provides information about Lyft financials for year 2021')\n", (959, 1053), False, 'from llama_index.legacy.tools.types import ToolMetadata\n'), ((1150, 1239), 'llama_index.legacy.question_gen.types.SubQuestion', 'SubQuestion', ([], {'sub_question': '"""What is the revenue growth of Uber"""', 'tool_name': '"""uber_10k"""'}), "(sub_question='What is the revenue growth of Uber', tool_name=\n 'uber_10k')\n", (1161, 1239), False, 'from llama_index.legacy.question_gen.types import SubQuestion\n'), ((1254, 1330), 'llama_index.legacy.question_gen.types.SubQuestion', 'SubQuestion', ([], {'sub_question': '"""What is the EBITDA of Uber"""', 'tool_name': '"""uber_10k"""'}), "(sub_question='What is the EBITDA of Uber', tool_name='uber_10k')\n", (1265, 1330), False, 'from llama_index.legacy.question_gen.types import SubQuestion\n'), ((1336, 1425), 'llama_index.legacy.question_gen.types.SubQuestion', 'SubQuestion', ([], {'sub_question': '"""What is the revenue growth of Lyft"""', 'tool_name': '"""lyft_10k"""'}), "(sub_question='What is the revenue growth of Lyft', tool_name=\n 'lyft_10k')\n", (1347, 1425), False, 'from llama_index.legacy.question_gen.types import SubQuestion\n'), ((1440, 1516), 'llama_index.legacy.question_gen.types.SubQuestion', 'SubQuestion', ([], {'sub_question': '"""What is the EBITDA of Lyft"""', 'tool_name': '"""lyft_10k"""'}), "(sub_question='What is the EBITDA of Lyft', tool_name='lyft_10k')\n", (1451, 1516), False, 'from llama_index.legacy.question_gen.types import SubQuestion\n')] |
import os
import openai
from typing import Union
import collections
from IPython.display import Markdown, display
# access/create the .env file in the project dir for getting API keys. Create a .env file in the project/repository root,
# and add your own API key like "OPENAI_API_KEY = <your key>" without any quotes, after you pull this code in your IDE (VS Code devcontainer recommended).
# .env has already been added to git ignore so don't worry when pushing all files to remote.
from dotenv import load_dotenv
load_dotenv()
# import the required langchain and llama-index libraries.
# also the libraries for this querying pipeline.
from langchain import OpenAI
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from llama_index.langchain_helpers.agents import LlamaToolkit, create_llama_chat_agent, IndexToolConfig
from llama_index import (LLMPredictor, ServiceContext, SimpleDirectoryReader,
SQLDatabase, StorageContext, VectorStoreIndex,
set_global_service_context)
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.indices.struct_store import SQLTableRetrieverQueryEngine
from llama_index.indices.struct_store.sql_query import NLSQLTableQueryEngine
from llama_index.logger import LlamaLogger
from llama_index.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.objects import (ObjectIndex, SQLTableNodeMapping,
SQLTableSchema)
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.retrievers import VectorIndexRetriever
# DB Interface library
from sqlalchemy import (Column, Integer, MetaData, String, Table, column,
create_engine, select, inspect)
# import DB settings
from dbconnector import DBcomm
# Import Global runtime settings
from settings import runtime
##################################################################################################################################################################
# Logger object for logging the pipeline
llama_logger = LlamaLogger()
## OPEN AI API KEY
openai_key = os.getenv('OPENAI_API_KEY')
openai.api_key = openai_key
## MODE SELECTION AS PER SETTINGS.PY FILE
USE_PRECISION_PIPELINE = runtime["precision_mode"]
USE_LOCAL_EMBED_MODEL = runtime["local_embed"]
## OPEN AI CONFIGURATION or LLAMA CONFIGURATION AS PER MODE SELECTION
class LLMConf () :
def __init__(self) :
if USE_PRECISION_PIPELINE : # This is by-default TRUE while development phase
# gpt 3.5 and gpt 4 route
self.llm_fast = LLMPredictor(llm=ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo-16k"))
self.llm_deep = LLMPredictor(llm=ChatOpenAI(temperature=0.1, model_name="gpt-4"))
self.llm_super = LLMPredictor(llm=ChatOpenAI(temperature=0.2, model_name="gpt-4-32k"))
else :
# llama 2 route: install LlamaCPP to enable GPU efficient LLama-2 13B chat model to work acc to the production environment chosen.
# download guide: https://github.com/abetlen/llama-cpp-python#installation-with-openblas--cublas--clblast--metal
# implementation guide: https://gpt-index.readthedocs.io/en/latest/examples/llm/llama_2_llama_cpp.html
'''
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
llm = LlamaCPP(
# You can pass in the URL to a GGML model to download it automatically
model_url="https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/resolve/main/llama-2-13b-chat.ggmlv3.q4_0.bin",
# optionally, you can set the path to a pre-downloaded model instead of model_url
model_path=None,
temperature=0.1,
max_new_tokens=256,
# llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
context_window=3900,
# kwargs to pass to __call__()
generate_kwargs={},
# kwargs to pass to __init__()
# set to at least 1 to use GPU
model_kwargs={"n_gpu_layers": 1},
# transform inputs into Llama2 format
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=True,
)
'''
pass
## INSTANTIATE LLMs
llm_conf = LLMConf()
## LLAMA-INDEX CONFIGURATION
## Service context shared globally by the whole application
service_context = ServiceContext.from_defaults (llm=llm_conf.llm_deep if USE_PRECISION_PIPELINE else llm_conf.llm_fast,
#embed_model="local" if USE_LOCAL_EMBED_MODEL else None, # None for openai embeddings i.e. default for llamaindex
llama_logger=llama_logger)
set_global_service_context(service_context) # only for dev phase, later remove this line and use locally instantiated service_context directly based on the usecase
class Kwairy () :
def __init__(self) :
self.task_stack = collections.deque()
self.reflect_stack = collections.deque()
self.create_tableschema_index()
def set_task (self, task : Union[str, object]) :
self.task_stack.append(task)
def get_task (self) :
return self.task_stack.popleft()
def set_note(self, reflection : str) :
self.reflect_stack.append(reflection)
def create_tableschema_index (self) :
inspector = inspect(DBcomm.sql_engine)
self.sql_table_names = inspector.get_table_names()
self.indices_created = False
self.sqldb, self.schemaindex = None, None
#### SQL DB index
# load all table definitions as indexes for retrieval later
print("Loading table schema as object index")
metadata_obj = MetaData()
metadata_obj.reflect(DBcomm.sql_engine)
sql_database = SQLDatabase(DBcomm.sql_engine)
table_node_mapping = SQLTableNodeMapping(sql_database)
table_schema_objs = []
for table_name in metadata_obj.tables.keys():
table_schema_objs.append(SQLTableSchema(table_name=table_name))
# Dump the table schema information into a vector index. The vector index is stored within the context builder for future use.
tableschema_index = ObjectIndex.from_objects(
table_schema_objs,
table_node_mapping,
VectorStoreIndex,
)
self.sqldb, self.schemaindex = sql_database, tableschema_index
def sql_pipeline( self, question: Union[str, list[str]] , synthesize_response: bool = True ) :
db, ts_index = self.create_tableschema_index()
query_engine = SQLTableRetrieverQueryEngine(db, ts_index.as_retriever(similarity_top_k=1), service_context=service_context)
pass
def ingest(user_input : str) :
# given this user query, we need to find the intent and entities
# and then we need to find the relevant tables and columns
# and then we need to generate the SQL query
# and then we need to execute the SQL query
# and then we need to return the results
# and then we need to display the results
# and then we need to ask the user if they want to continue
# and then we need to ask the user if they want to ask another question
# and then we need to ask the user if they want to exit
# and then we need to exit
pass
def reply(pipeline_output : str) :
pass
| [
"llama_index.SQLDatabase",
"llama_index.ServiceContext.from_defaults",
"llama_index.objects.SQLTableSchema",
"llama_index.objects.ObjectIndex.from_objects",
"llama_index.set_global_service_context",
"llama_index.logger.LlamaLogger",
"llama_index.objects.SQLTableNodeMapping"
] | [((517, 530), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (528, 530), False, 'from dotenv import load_dotenv\n'), ((2230, 2243), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (2241, 2243), False, 'from llama_index.logger import LlamaLogger\n'), ((2277, 2304), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2286, 2304), False, 'import os\n'), ((4441, 4572), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': '(llm_conf.llm_deep if USE_PRECISION_PIPELINE else llm_conf.llm_fast)', 'llama_logger': 'llama_logger'}), '(llm=llm_conf.llm_deep if\n USE_PRECISION_PIPELINE else llm_conf.llm_fast, llama_logger=llama_logger)\n', (4469, 4572), False, 'from llama_index import LLMPredictor, ServiceContext, SimpleDirectoryReader, SQLDatabase, StorageContext, VectorStoreIndex, set_global_service_context\n'), ((4714, 4757), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (4740, 4757), False, 'from llama_index import LLMPredictor, ServiceContext, SimpleDirectoryReader, SQLDatabase, StorageContext, VectorStoreIndex, set_global_service_context\n'), ((4940, 4959), 'collections.deque', 'collections.deque', ([], {}), '()\n', (4957, 4959), False, 'import collections\n'), ((4983, 5002), 'collections.deque', 'collections.deque', ([], {}), '()\n', (5000, 5002), False, 'import collections\n'), ((5316, 5342), 'sqlalchemy.inspect', 'inspect', (['DBcomm.sql_engine'], {}), '(DBcomm.sql_engine)\n', (5323, 5342), False, 'from sqlalchemy import Column, Integer, MetaData, String, Table, column, create_engine, select, inspect\n'), ((5618, 5628), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (5626, 5628), False, 'from sqlalchemy import Column, Integer, MetaData, String, Table, column, create_engine, select, inspect\n'), ((5688, 5718), 'llama_index.SQLDatabase', 'SQLDatabase', (['DBcomm.sql_engine'], {}), '(DBcomm.sql_engine)\n', (5699, 5718), False, 'from llama_index import LLMPredictor, ServiceContext, SimpleDirectoryReader, SQLDatabase, StorageContext, VectorStoreIndex, set_global_service_context\n'), ((5742, 5775), 'llama_index.objects.SQLTableNodeMapping', 'SQLTableNodeMapping', (['sql_database'], {}), '(sql_database)\n', (5761, 5775), False, 'from llama_index.objects import ObjectIndex, SQLTableNodeMapping, SQLTableSchema\n'), ((6067, 6152), 'llama_index.objects.ObjectIndex.from_objects', 'ObjectIndex.from_objects', (['table_schema_objs', 'table_node_mapping', 'VectorStoreIndex'], {}), '(table_schema_objs, table_node_mapping,\n VectorStoreIndex)\n', (6091, 6152), False, 'from llama_index.objects import ObjectIndex, SQLTableNodeMapping, SQLTableSchema\n'), ((5877, 5914), 'llama_index.objects.SQLTableSchema', 'SQLTableSchema', ([], {'table_name': 'table_name'}), '(table_name=table_name)\n', (5891, 5914), False, 'from llama_index.objects import ObjectIndex, SQLTableNodeMapping, SQLTableSchema\n'), ((2731, 2790), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-3.5-turbo-16k"""'}), "(temperature=0.1, model_name='gpt-3.5-turbo-16k')\n", (2741, 2790), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2828, 2875), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-4"""'}), "(temperature=0.1, model_name='gpt-4')\n", (2838, 2875), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2914, 2965), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.2)', 'model_name': '"""gpt-4-32k"""'}), "(temperature=0.2, model_name='gpt-4-32k')\n", (2924, 2965), False, 'from langchain.chat_models import ChatOpenAI\n')] |
#
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import io
from typing import Dict, Any
import openai
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from langstream import Sink, Record
from llama_index import VectorStoreIndex, Document
from llama_index.vector_stores import CassandraVectorStore
class LlamaIndexCassandraSink(Sink):
def __init__(self):
self.config = None
self.session = None
self.index = None
def init(self, config: Dict[str, Any]):
self.config = config
openai.api_key = config["openaiKey"]
def start(self):
secure_bundle = self.config["cassandra"]["secureBundle"]
secure_bundle = secure_bundle.removeprefix("base64:")
secure_bundle = base64.b64decode(secure_bundle)
cluster = Cluster(
cloud={
"secure_connect_bundle": io.BytesIO(secure_bundle),
"use_default_tempdir": True,
},
auth_provider=PlainTextAuthProvider(
self.config["cassandra"]["username"],
self.config["cassandra"]["password"],
),
)
self.session = cluster.connect()
vector_store = CassandraVectorStore(
session=self.session,
keyspace=self.config["cassandra"]["keyspace"],
table=self.config["cassandra"]["table"],
embedding_dimension=1536,
insertion_batch_size=15,
)
self.index = VectorStoreIndex.from_vector_store(vector_store)
def write(self, record: Record):
self.index.insert(Document(text=record.value()))
def close(self):
if self.session:
self.session.shutdown()
| [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.CassandraVectorStore"
] | [((1311, 1342), 'base64.b64decode', 'base64.b64decode', (['secure_bundle'], {}), '(secure_bundle)\n', (1327, 1342), False, 'import base64\n'), ((1765, 1955), 'llama_index.vector_stores.CassandraVectorStore', 'CassandraVectorStore', ([], {'session': 'self.session', 'keyspace': "self.config['cassandra']['keyspace']", 'table': "self.config['cassandra']['table']", 'embedding_dimension': '(1536)', 'insertion_batch_size': '(15)'}), "(session=self.session, keyspace=self.config['cassandra'\n ]['keyspace'], table=self.config['cassandra']['table'],\n embedding_dimension=1536, insertion_batch_size=15)\n", (1785, 1955), False, 'from llama_index.vector_stores import CassandraVectorStore\n'), ((2040, 2088), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (2074, 2088), False, 'from llama_index import VectorStoreIndex, Document\n'), ((1544, 1646), 'cassandra.auth.PlainTextAuthProvider', 'PlainTextAuthProvider', (["self.config['cassandra']['username']", "self.config['cassandra']['password']"], {}), "(self.config['cassandra']['username'], self.config[\n 'cassandra']['password'])\n", (1565, 1646), False, 'from cassandra.auth import PlainTextAuthProvider\n'), ((1431, 1456), 'io.BytesIO', 'io.BytesIO', (['secure_bundle'], {}), '(secure_bundle)\n', (1441, 1456), False, 'import io\n')] |
import os
from django.conf import settings
from postdata.models import UploadedFile
from .create_node import *
import llama_index
from llama_index.llms import OpenAI
from llama_index import (VectorStoreIndex,
ServiceContext,
set_global_service_context,
)
llama_index.set_global_handler("simple")
# define LLM
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0, max_tokens=4000, api_key=os.getenv("OPENAI_API_KEY"))
# configure service context
service_context = ServiceContext.from_defaults(llm=llm)
set_global_service_context(service_context)
class ContentAgent:
def __init__(self, user):
self.user = user
self.index = VectorStoreIndex([])
def generate_index(self):
uploads = UploadedFile.objects.filter(user_name=self.user)
url_list = set()
text_list = set()
for upload in uploads:
if upload.text:
text_list.add(upload.text)
if upload.url:
url_list.add(upload.url)
user_id = self.user.id
files_dir = os.path.join(settings.MEDIA_ROOT, f"user_{user_id}", 'original_files')
print(f'text_list: {" ".join(text_list)}')
print(f'url_list: {" ".join(url_list)}')
print(f'files_dir: {files_dir}')
if url_list:
node = create_node_url(url_list)
self.index.insert_nodes(node)
if text_list:
node = create_node_text(text_list)
self.index.insert_nodes(node)
if os.listdir(files_dir):
node = create_node_dir(files_dir)
self.index.insert_nodes(node)
def generate_prompt(self, prompt_details):
prompt = '请根据以下描述,使用中文,撰写一篇文章'
if 'topic' in prompt_details and prompt_details['topic']:
prompt += f",关于{prompt_details['topic']}"
if 'outline' in prompt_details and prompt_details['outline']:
prompt += ",文章应包含以下几个部分: "
for idx, point in enumerate(prompt_details['outline'], start=1):
prompt += f"{idx}. {point};"
if 'primaryKeyword' in prompt_details and prompt_details['primaryKeyword']:
prompt += f"请确保文章内容围绕{prompt_details['primaryKeyword']}这一主题"
if 'secondaryKeywords' in prompt_details and prompt_details['secondaryKeywords']:
prompt += f",同时涉及{prompt_details['secondaryKeywords']}这些关键词。"
else:
prompt += "。"
if 'view' in prompt_details and prompt_details['view']:
prompt += f"文章应该采用{prompt_details['view']}的人称。"
if 'tone' in prompt_details and prompt_details['tone']:
prompt += f"文章应该采用{prompt_details['tone']}的语气。"
prompt += "在文章中嵌入相关的事实材料以支持论述。最后,请使用Markdown格式进行排版,确保文章结构清晰。"
return prompt
def write(self, description):
prompt = self.generate_prompt(description)
self.generate_index()
query_engine = self.index.as_chat_engine()
response = query_engine.chat(prompt)
return response.response
| [
"llama_index.set_global_service_context",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.set_global_handler"
] | [((334, 374), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (364, 374), False, 'import llama_index\n'), ((544, 581), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (572, 581), False, 'from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context\n'), ((582, 625), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (608, 625), False, 'from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context\n'), ((469, 496), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (478, 496), False, 'import os\n'), ((723, 743), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['[]'], {}), '([])\n', (739, 743), False, 'from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context\n'), ((797, 845), 'postdata.models.UploadedFile.objects.filter', 'UploadedFile.objects.filter', ([], {'user_name': 'self.user'}), '(user_name=self.user)\n', (824, 845), False, 'from postdata.models import UploadedFile\n'), ((1118, 1188), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', 'f"""user_{user_id}"""', '"""original_files"""'], {}), "(settings.MEDIA_ROOT, f'user_{user_id}', 'original_files')\n", (1130, 1188), False, 'import os\n'), ((1565, 1586), 'os.listdir', 'os.listdir', (['files_dir'], {}), '(files_dir)\n', (1575, 1586), False, 'import os\n')] |
"""Base retrieval abstractions."""
import asyncio
from abc import abstractmethod
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.evaluation.retrieval.metrics import resolve_metrics
from llama_index.core.evaluation.retrieval.metrics_base import (
BaseRetrievalMetric,
RetrievalMetricResult,
)
from llama_index.core.llama_dataset.legacy.embedding import (
EmbeddingQAFinetuneDataset,
)
class RetrievalEvalMode(str, Enum):
"""Evaluation of retrieval modality."""
TEXT = "text"
IMAGE = "image"
@classmethod
def from_str(cls, label: str) -> "RetrievalEvalMode":
if label == "text":
return RetrievalEvalMode.TEXT
elif label == "image":
return RetrievalEvalMode.IMAGE
else:
raise NotImplementedError
class RetrievalEvalResult(BaseModel):
"""Retrieval eval result.
NOTE: this abstraction might change in the future.
Attributes:
query (str): Query string
expected_ids (List[str]): Expected ids
retrieved_ids (List[str]): Retrieved ids
metric_dict (Dict[str, BaseRetrievalMetric]): \
Metric dictionary for the evaluation
"""
class Config:
arbitrary_types_allowed = True
query: str = Field(..., description="Query string")
expected_ids: List[str] = Field(..., description="Expected ids")
expected_texts: Optional[List[str]] = Field(
default=None,
description="Expected texts associated with nodes provided in `expected_ids`",
)
retrieved_ids: List[str] = Field(..., description="Retrieved ids")
retrieved_texts: List[str] = Field(..., description="Retrieved texts")
mode: "RetrievalEvalMode" = Field(
default=RetrievalEvalMode.TEXT, description="text or image"
)
metric_dict: Dict[str, RetrievalMetricResult] = Field(
..., description="Metric dictionary for the evaluation"
)
@property
def metric_vals_dict(self) -> Dict[str, float]:
"""Dictionary of metric values."""
return {k: v.score for k, v in self.metric_dict.items()}
def __str__(self) -> str:
"""String representation."""
return f"Query: {self.query}\n" f"Metrics: {self.metric_vals_dict!s}\n"
class BaseRetrievalEvaluator(BaseModel):
"""Base Retrieval Evaluator class."""
metrics: List[BaseRetrievalMetric] = Field(
..., description="List of metrics to evaluate"
)
class Config:
arbitrary_types_allowed = True
@classmethod
def from_metric_names(
cls, metric_names: List[str], **kwargs: Any
) -> "BaseRetrievalEvaluator":
"""Create evaluator from metric names.
Args:
metric_names (List[str]): List of metric names
**kwargs: Additional arguments for the evaluator
"""
metric_types = resolve_metrics(metric_names)
return cls(metrics=[metric() for metric in metric_types], **kwargs)
@abstractmethod
async def _aget_retrieved_ids_and_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids and texts."""
raise NotImplementedError
def evaluate(
self,
query: str,
expected_ids: List[str],
expected_texts: Optional[List[str]] = None,
mode: RetrievalEvalMode = RetrievalEvalMode.TEXT,
**kwargs: Any,
) -> RetrievalEvalResult:
"""Run evaluation results with query string and expected ids.
Args:
query (str): Query string
expected_ids (List[str]): Expected ids
Returns:
RetrievalEvalResult: Evaluation result
"""
return asyncio.run(
self.aevaluate(
query=query,
expected_ids=expected_ids,
expected_texts=expected_texts,
mode=mode,
**kwargs,
)
)
# @abstractmethod
async def aevaluate(
self,
query: str,
expected_ids: List[str],
expected_texts: Optional[List[str]] = None,
mode: RetrievalEvalMode = RetrievalEvalMode.TEXT,
**kwargs: Any,
) -> RetrievalEvalResult:
"""Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
retrieved_ids, retrieved_texts = await self._aget_retrieved_ids_and_texts(
query, mode
)
metric_dict = {}
for metric in self.metrics:
eval_result = metric.compute(
query, expected_ids, retrieved_ids, expected_texts, retrieved_texts
)
metric_dict[metric.metric_name] = eval_result
return RetrievalEvalResult(
query=query,
expected_ids=expected_ids,
expected_texts=expected_texts,
retrieved_ids=retrieved_ids,
retrieved_texts=retrieved_texts,
mode=mode,
metric_dict=metric_dict,
)
async def aevaluate_dataset(
self,
dataset: EmbeddingQAFinetuneDataset,
workers: int = 2,
show_progress: bool = False,
**kwargs: Any,
) -> List[RetrievalEvalResult]:
"""Run evaluation with dataset."""
semaphore = asyncio.Semaphore(workers)
async def eval_worker(
query: str, expected_ids: List[str], mode: RetrievalEvalMode
) -> RetrievalEvalResult:
async with semaphore:
return await self.aevaluate(query, expected_ids=expected_ids, mode=mode)
response_jobs = []
mode = RetrievalEvalMode.from_str(dataset.mode)
for query_id, query in dataset.queries.items():
expected_ids = dataset.relevant_docs[query_id]
response_jobs.append(eval_worker(query, expected_ids, mode))
if show_progress:
from tqdm.asyncio import tqdm_asyncio
eval_results = await tqdm_asyncio.gather(*response_jobs)
else:
eval_results = await asyncio.gather(*response_jobs)
return eval_results
| [
"llama_index.core.evaluation.retrieval.metrics.resolve_metrics",
"llama_index.core.bridge.pydantic.Field"
] | [((1364, 1402), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Query string"""'}), "(..., description='Query string')\n", (1369, 1402), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1433, 1471), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Expected ids"""'}), "(..., description='Expected ids')\n", (1438, 1471), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1514, 1617), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Expected texts associated with nodes provided in `expected_ids`"""'}), "(default=None, description=\n 'Expected texts associated with nodes provided in `expected_ids`')\n", (1519, 1617), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1667, 1706), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retrieved ids"""'}), "(..., description='Retrieved ids')\n", (1672, 1706), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1740, 1781), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retrieved texts"""'}), "(..., description='Retrieved texts')\n", (1745, 1781), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1814, 1880), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'RetrievalEvalMode.TEXT', 'description': '"""text or image"""'}), "(default=RetrievalEvalMode.TEXT, description='text or image')\n", (1819, 1880), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1947, 2009), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Metric dictionary for the evaluation"""'}), "(..., description='Metric dictionary for the evaluation')\n", (1952, 2009), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2474, 2527), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""List of metrics to evaluate"""'}), "(..., description='List of metrics to evaluate')\n", (2479, 2527), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2950, 2979), 'llama_index.core.evaluation.retrieval.metrics.resolve_metrics', 'resolve_metrics', (['metric_names'], {}), '(metric_names)\n', (2965, 2979), False, 'from llama_index.core.evaluation.retrieval.metrics import resolve_metrics\n'), ((5539, 5565), 'asyncio.Semaphore', 'asyncio.Semaphore', (['workers'], {}), '(workers)\n', (5556, 5565), False, 'import asyncio\n'), ((6210, 6245), 'tqdm.asyncio.tqdm_asyncio.gather', 'tqdm_asyncio.gather', (['*response_jobs'], {}), '(*response_jobs)\n', (6229, 6245), False, 'from tqdm.asyncio import tqdm_asyncio\n'), ((6293, 6323), 'asyncio.gather', 'asyncio.gather', (['*response_jobs'], {}), '(*response_jobs)\n', (6307, 6323), False, 'import asyncio\n')] |
"""Code splitter."""
from typing import Any, Callable, List, Optional
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks.base import CallbackManager
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.node_parser.interface import TextSplitter
from llama_index.legacy.node_parser.node_utils import default_id_func
from llama_index.legacy.schema import Document
DEFAULT_CHUNK_LINES = 40
DEFAULT_LINES_OVERLAP = 15
DEFAULT_MAX_CHARS = 1500
class CodeSplitter(TextSplitter):
"""Split code using a AST parser.
Thank you to Kevin Lu / SweepAI for suggesting this elegant code splitting solution.
https://docs.sweep.dev/blogs/chunking-2m-files
"""
language: str = Field(
description="The programming language of the code being split."
)
chunk_lines: int = Field(
default=DEFAULT_CHUNK_LINES,
description="The number of lines to include in each chunk.",
gt=0,
)
chunk_lines_overlap: int = Field(
default=DEFAULT_LINES_OVERLAP,
description="How many lines of code each chunk overlaps with.",
gt=0,
)
max_chars: int = Field(
default=DEFAULT_MAX_CHARS,
description="Maximum number of characters per chunk.",
gt=0,
)
_parser: Any = PrivateAttr()
def __init__(
self,
language: str,
chunk_lines: int = DEFAULT_CHUNK_LINES,
chunk_lines_overlap: int = DEFAULT_LINES_OVERLAP,
max_chars: int = DEFAULT_MAX_CHARS,
parser: Any = None,
callback_manager: Optional[CallbackManager] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
id_func: Optional[Callable[[int, Document], str]] = None,
) -> None:
"""Initialize a CodeSplitter."""
from tree_sitter import Parser
if parser is None:
try:
import tree_sitter_languages
parser = tree_sitter_languages.get_parser(language)
except ImportError:
raise ImportError(
"Please install tree_sitter_languages to use CodeSplitter."
"Or pass in a parser object."
)
except Exception:
print(
f"Could not get parser for language {language}. Check "
"https://github.com/grantjenks/py-tree-sitter-languages#license "
"for a list of valid languages."
)
raise
if not isinstance(parser, Parser):
raise ValueError("Parser must be a tree-sitter Parser object.")
self._parser = parser
callback_manager = callback_manager or CallbackManager([])
id_func = id_func or default_id_func
super().__init__(
language=language,
chunk_lines=chunk_lines,
chunk_lines_overlap=chunk_lines_overlap,
max_chars=max_chars,
callback_manager=callback_manager,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
id_func=id_func,
)
@classmethod
def from_defaults(
cls,
language: str,
chunk_lines: int = DEFAULT_CHUNK_LINES,
chunk_lines_overlap: int = DEFAULT_LINES_OVERLAP,
max_chars: int = DEFAULT_MAX_CHARS,
callback_manager: Optional[CallbackManager] = None,
parser: Any = None,
) -> "CodeSplitter":
"""Create a CodeSplitter with default values."""
return cls(
language=language,
chunk_lines=chunk_lines,
chunk_lines_overlap=chunk_lines_overlap,
max_chars=max_chars,
parser=parser,
)
@classmethod
def class_name(cls) -> str:
return "CodeSplitter"
def _chunk_node(self, node: Any, text: str, last_end: int = 0) -> List[str]:
new_chunks = []
current_chunk = ""
for child in node.children:
if child.end_byte - child.start_byte > self.max_chars:
# Child is too big, recursively chunk the child
if len(current_chunk) > 0:
new_chunks.append(current_chunk)
current_chunk = ""
new_chunks.extend(self._chunk_node(child, text, last_end))
elif (
len(current_chunk) + child.end_byte - child.start_byte > self.max_chars
):
# Child would make the current chunk too big, so start a new chunk
new_chunks.append(current_chunk)
current_chunk = text[last_end : child.end_byte]
else:
current_chunk += text[last_end : child.end_byte]
last_end = child.end_byte
if len(current_chunk) > 0:
new_chunks.append(current_chunk)
return new_chunks
def split_text(self, text: str) -> List[str]:
"""Split incoming code and return chunks using the AST."""
with self.callback_manager.event(
CBEventType.CHUNKING, payload={EventPayload.CHUNKS: [text]}
) as event:
tree = self._parser.parse(bytes(text, "utf-8"))
if (
not tree.root_node.children
or tree.root_node.children[0].type != "ERROR"
):
chunks = [
chunk.strip() for chunk in self._chunk_node(tree.root_node, text)
]
event.on_end(
payload={EventPayload.CHUNKS: chunks},
)
return chunks
else:
raise ValueError(f"Could not parse code with language {self.language}.")
# TODO: set up auto-language detection using something like https://github.com/yoeo/guesslang.
| [
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.callbacks.base.CallbackManager"
] | [((779, 849), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The programming language of the code being split."""'}), "(description='The programming language of the code being split.')\n", (784, 849), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((887, 993), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CHUNK_LINES', 'description': '"""The number of lines to include in each chunk."""', 'gt': '(0)'}), "(default=DEFAULT_CHUNK_LINES, description=\n 'The number of lines to include in each chunk.', gt=0)\n", (892, 993), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1051, 1162), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_LINES_OVERLAP', 'description': '"""How many lines of code each chunk overlaps with."""', 'gt': '(0)'}), "(default=DEFAULT_LINES_OVERLAP, description=\n 'How many lines of code each chunk overlaps with.', gt=0)\n", (1056, 1162), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1210, 1308), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MAX_CHARS', 'description': '"""Maximum number of characters per chunk."""', 'gt': '(0)'}), "(default=DEFAULT_MAX_CHARS, description=\n 'Maximum number of characters per chunk.', gt=0)\n", (1215, 1308), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1354, 1367), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1365, 1367), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2786, 2805), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2801, 2805), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((2022, 2064), 'tree_sitter_languages.get_parser', 'tree_sitter_languages.get_parser', (['language'], {}), '(language)\n', (2054, 2064), False, 'import tree_sitter_languages\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
from llama_index.llms import OpenAI
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"DocugamiKgRagSec10Q", "./docugami_kg_rag_sec_10_q"
)
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff")
judge_llm = OpenAI(model="gpt-3.5-turbo")
rag_evaluator = RagEvaluatorPack(
query_engine=query_engine, rag_dataset=rag_dataset, judge_llm=judge_llm
)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.llms.OpenAI"
] | [((301, 376), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""DocugamiKgRagSec10Q"""', '"""./docugami_kg_rag_sec_10_q"""'], {}), "('DocugamiKgRagSec10Q', './docugami_kg_rag_sec_10_q')\n", (323, 376), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((435, 487), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (466, 487), False, 'from llama_index.core import VectorStoreIndex\n'), ((580, 635), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (599, 635), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((652, 681), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (658, 681), False, 'from llama_index.llms import OpenAI\n'), ((1567, 1591), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1589, 1591), False, 'import asyncio\n')] |
import os
import torch
import json
import argparse
from datasets import load_dataset
from llama_index import GPTVectorStoreIndex, Document, ServiceContext
from llama_index.indices.prompt_helper import PromptHelper
from transformers import AutoTokenizer
import openai
import tiktoken
#import GPUtil
stopped_num = 10000000
delay = 10
# Gpus = GPUtil.getGPUs()
def get_gpu_info():
gpulist = []
GPUtil.showUtilization()
for gpu in Gpus:
print('gpu.id:', gpu.id)
print('total GPU:', gpu.memoryTotal)
print('GPU usage:', gpu.memoryUsed)
print('gpu usage percent:', gpu.memoryUtil * 100)
gpulist.append([ gpu.id, gpu.memoryTotal, gpu.memoryUsed,gpu.memoryUtil * 100])
return gpulist
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default="llama-index", help="raw model name for evaluation")
parser.add_argument('--task', type=str, default=None, help="long context understanding tasks in LooGLE", choices=["shortdep_qa","longdep_qa","longdep_summarization","shortdep_cloze"])
parser.add_argument('--max_length', type=int, default=None, help="the max length of input prompt")
parser.add_argument('--model_path', type=str, default="./Models/")
parser.add_argument('--output_path', type=str, default="./Output/")
return parser.parse_args(args)
def num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def get_pred(data_instance, tokenizer, max_length, max_gen, prompt_format):
ans, groundtruth = [], []
preds = {}
raw_inputs = data_instance['input']
documents = [Document(text=raw_inputs)]
prompt_helper = PromptHelper(
context_window=max_length + 1000,
num_output=max_gen,
chunk_size_limit=1024,
chunk_overlap_ratio=0.1,
)
service_context = ServiceContext.from_defaults(
context_window=max_length + 1000,
num_output=max_gen,
prompt_helper=prompt_helper,
chunk_size_limit=1024,
)
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
query_engine = index.as_query_engine()
if data_instance['qa_pairs'] == 'none':
preds['qa_pairs'] = data_instance['qa_pairs']
json_obj = {'input': raw_inputs}
prompt = prompt_format.format(**json_obj)
tokenized_prompt = tokenizer.encode(prompt)
if len(tokenized_prompt) > max_length:
half = int(max_length/2)
prompt = tokenizer.decode(tokenized_prompt[:half])+tokenizer.decode(tokenized_prompt[-half:])
rsp = query_engine.query(prompt).response
ans.append(rsp)
groundtruth.append(data_instance["output"])
else:
preds['qa_pairs'] = eval(data_instance['qa_pairs'])
for j in eval(data_instance['qa_pairs']):
json_obj = {'Q':j['Q'], 'input': raw_inputs}
prompt = prompt_format.format(**json_obj)
tokenized_prompt = tokenizer.encode(prompt)
if len(tokenized_prompt) > max_length:
half = int(max_length/2)
prompt = tokenizer.decode(tokenized_prompt[:half])+tokenizer.decode(tokenized_prompt[-half:])
rsp = query_engine.query(prompt).response
ans.append(rsp)
groundtruth.append(j['A'])
preds['llm_output'] = ans
preds['output'] = groundtruth
return preds
def loads(path, task):
data = []
with open(path+task+".jsonl", "r") as f:
lines = f.readlines()
for line in lines:
data.append(json.loads(line))
return data
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args = parse_args()
# data = load_dataset('bigainlco/LooGLE', args.task, split="test")
data = loads("LooGLE-testdata/", args.task)
tokenizer = tiktoken.get_encoding("cl100k_base")
task2prompt = json.load(open("./config/task2prompt.json", "r"))
task2maxlen = json.load(open("./config/task2maxlen.json", "r"))
prompt_format = task2prompt[args.task]
max_gen = task2maxlen[args.task]
for i in data:
predictions = get_pred(i, tokenizer, args.max_length, max_gen, prompt_format)
with open(args.output_path + args.task + '_' + args.model_name + ".jsonl", "a+") as g:
g.write(json.dumps(predictions)+'\n')
| [
"llama_index.indices.prompt_helper.PromptHelper",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.Document"
] | [((783, 808), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (806, 808), False, 'import argparse\n'), ((1533, 1569), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (1554, 1569), False, 'import tiktoken\n'), ((1870, 1988), 'llama_index.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {'context_window': '(max_length + 1000)', 'num_output': 'max_gen', 'chunk_size_limit': '(1024)', 'chunk_overlap_ratio': '(0.1)'}), '(context_window=max_length + 1000, num_output=max_gen,\n chunk_size_limit=1024, chunk_overlap_ratio=0.1)\n', (1882, 1988), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((2047, 2186), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'context_window': '(max_length + 1000)', 'num_output': 'max_gen', 'prompt_helper': 'prompt_helper', 'chunk_size_limit': '(1024)'}), '(context_window=max_length + 1000, num_output=\n max_gen, prompt_helper=prompt_helper, chunk_size_limit=1024)\n', (2075, 2186), False, 'from llama_index import GPTVectorStoreIndex, Document, ServiceContext\n'), ((2233, 2311), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (2267, 2311), False, 'from llama_index import GPTVectorStoreIndex, Document, ServiceContext\n'), ((4121, 4157), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""cl100k_base"""'], {}), "('cl100k_base')\n", (4142, 4157), False, 'import tiktoken\n'), ((1823, 1848), 'llama_index.Document', 'Document', ([], {'text': 'raw_inputs'}), '(text=raw_inputs)\n', (1831, 1848), False, 'from llama_index import GPTVectorStoreIndex, Document, ServiceContext\n'), ((3923, 3948), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3946, 3948), False, 'import torch\n'), ((3824, 3840), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3834, 3840), False, 'import json\n'), ((4601, 4624), 'json.dumps', 'json.dumps', (['predictions'], {}), '(predictions)\n', (4611, 4624), False, 'import json\n')] |
# inspired by: https://github.com/rushic24/langchain-remember-me-llm/
# MIT license
import torch
from json_database import JsonStorageXDG
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.llms.base import LLM
from llama_index import Document
from llama_index import LLMPredictor, ServiceContext
from llama_index import LangchainEmbedding, GPTVectorStoreIndex as GPTSimpleVectorIndex
from ovos_plugin_manager.templates.solvers import QuestionSolver
from transformers import pipeline
class UserInfo:
db = JsonStorageXDG("personalLLM")
db.setdefault("data", [])
@classmethod
def remember(cls, fact):
cls.db["data"].append(fact)
cls.db.store()
class PersonalLLMSolver(QuestionSolver):
enable_tx = True
priority = 80
def __init__(self, config=None):
config = config or {}
config["lang"] = "en" # only english supported (not really, depends on model... TODO)
super().__init__(config)
# a class inside a class :O
class PersonalUserLLM(LLM):
model_name = config.get("model") or "google/flan-t5-small"
pipeline = pipeline("text2text-generation", model=model_name, device=0,
model_kwargs={"torch_dtype": torch.bfloat16})
initial_prompt = config.get("initial_prompt") or \
'You are a highly intelligent question answering A.I. based on the information provided by the user. ' \
'If the answer cannot be found in the user provided information, write "I could not find an answer."'
@classmethod
def get_engine(cls):
llm_predictor = LLMPredictor(llm=cls())
hfemb = HuggingFaceEmbeddings()
embed_model = LangchainEmbedding(hfemb)
documents = [Document(t) for t in UserInfo.db["data"]]
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
return index.as_query_engine()
def _call(self, prompt, stop=None):
text = f"{self.initial_prompt}\n\n{prompt} {stop}" if stop is not None else f"{self.initial_prompt}\n\n{prompt}"
return self.pipeline(text, max_length=9999)[0]["generated_text"]
@property
def _identifying_params(self):
return {"name_of_model": self.model_name}
@property
def _llm_type(self):
return "custom"
self.llm = PersonalUserLLM.get_engine()
# officially exported Solver methods
def get_spoken_answer(self, query, context=None):
return self.llm.query(query).response
| [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.Document",
"llama_index.LangchainEmbedding"
] | [((541, 570), 'json_database.JsonStorageXDG', 'JsonStorageXDG', (['"""personalLLM"""'], {}), "('personalLLM')\n", (555, 570), False, 'from json_database import JsonStorageXDG\n'), ((1152, 1263), 'transformers.pipeline', 'pipeline', (['"""text2text-generation"""'], {'model': 'model_name', 'device': '(0)', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text2text-generation', model=model_name, device=0, model_kwargs={\n 'torch_dtype': torch.bfloat16})\n", (1160, 1263), False, 'from transformers import pipeline\n'), ((1758, 1781), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (1779, 1781), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1812, 1837), 'llama_index.LangchainEmbedding', 'LangchainEmbedding', (['hfemb'], {}), '(hfemb)\n', (1830, 1837), False, 'from llama_index import LangchainEmbedding, GPTVectorStoreIndex as GPTSimpleVectorIndex\n'), ((1943, 2030), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (1971, 2030), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((2050, 2129), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (2085, 2129), True, 'from llama_index import LangchainEmbedding, GPTVectorStoreIndex as GPTSimpleVectorIndex\n'), ((1867, 1878), 'llama_index.Document', 'Document', (['t'], {}), '(t)\n', (1875, 1878), False, 'from llama_index import Document\n')] |
from dotenv import load_dotenv
import os.path
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
import logging
import sys
load_dotenv()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# check if storage already exists
PERSIST_DIR = "./storage"
if not os.path.exists(PERSIST_DIR):
# load the documents and create the index
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# Either way we can now query the index
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response)
# retrieve the top 10 most similar documents
query_engine = index.as_query_engine(similarity_top=10)
response = query_engine.query("What did the author do growing up?")
print(response)
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((204, 217), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (215, 217), False, 'from dotenv import load_dotenv\n'), ((219, 277), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (238, 277), False, 'import logging\n'), ((309, 349), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (330, 349), False, 'import logging\n'), ((564, 606), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (595, 606), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((749, 802), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (777, 802), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((815, 855), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (838, 855), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((278, 297), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (295, 297), False, 'import logging\n'), ((510, 539), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (531, 539), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n')] |
"""Table node mapping."""
from typing import Any, Dict, Optional, Sequence
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.objects.base_node_mapping import (
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
BaseObjectNodeMapping,
)
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.utilities.sql_wrapper import SQLDatabase
class SQLTableSchema(BaseModel):
"""Lightweight representation of a SQL table."""
table_name: str
context_str: Optional[str] = None
class SQLTableNodeMapping(BaseObjectNodeMapping[SQLTableSchema]):
"""SQL Table node mapping."""
def __init__(self, sql_database: SQLDatabase) -> None:
self._sql_database = sql_database
@classmethod
def from_objects(
cls,
objs: Sequence[SQLTableSchema],
*args: Any,
sql_database: Optional[SQLDatabase] = None,
**kwargs: Any,
) -> "BaseObjectNodeMapping":
"""Initialize node mapping."""
if sql_database is None:
raise ValueError("Must provide sql_database")
# ignore objs, since we are building from sql_database
return cls(sql_database)
def _add_object(self, obj: SQLTableSchema) -> None:
raise NotImplementedError
def to_node(self, obj: SQLTableSchema) -> TextNode:
"""To node."""
# taken from existing schema logic
table_text = (
f"Schema of table {obj.table_name}:\n"
f"{self._sql_database.get_single_table_info(obj.table_name)}\n"
)
metadata = {"name": obj.table_name}
if obj.context_str is not None:
table_text += f"Context of table {obj.table_name}:\n"
table_text += obj.context_str
metadata["context"] = obj.context_str
return TextNode(
text=table_text,
metadata=metadata,
excluded_embed_metadata_keys=["name", "context"],
excluded_llm_metadata_keys=["name", "context"],
)
def _from_node(self, node: BaseNode) -> SQLTableSchema:
"""From node."""
if node.metadata is None:
raise ValueError("Metadata must be set")
return SQLTableSchema(
table_name=node.metadata["name"], context_str=node.metadata.get("context")
)
@property
def obj_node_mapping(self) -> Dict[int, Any]:
"""The mapping data structure between node and object."""
raise NotImplementedError("Subclasses should implement this!")
def persist(
self, persist_dir: str = ..., obj_node_mapping_fname: str = ...
) -> None:
"""Persist objs."""
raise NotImplementedError("Subclasses should implement this!")
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME,
) -> "SQLTableNodeMapping":
raise NotImplementedError(
"This object node mapping does not support persist method."
)
| [
"llama_index.core.schema.TextNode"
] | [((1821, 1968), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'table_text', 'metadata': 'metadata', 'excluded_embed_metadata_keys': "['name', 'context']", 'excluded_llm_metadata_keys': "['name', 'context']"}), "(text=table_text, metadata=metadata, excluded_embed_metadata_keys=[\n 'name', 'context'], excluded_llm_metadata_keys=['name', 'context'])\n", (1829, 1968), False, 'from llama_index.core.schema import BaseNode, TextNode\n')] |
import logging
from typing import Any, Dict, Generator, List, Optional, Tuple, Type, Union, cast
from llama_index.legacy.agent.openai.utils import resolve_tool_choice
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.llms.openai import OpenAI
from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool
from llama_index.legacy.program.llm_prompt_program import BaseLLMFunctionProgram
from llama_index.legacy.program.utils import create_list_model
from llama_index.legacy.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.legacy.types import Model
_logger = logging.getLogger(__name__)
def _default_tool_choice(
output_cls: Type[Model], allow_multiple: bool = False
) -> Union[str, Dict[str, Any]]:
"""Default OpenAI tool to choose."""
if allow_multiple:
return "auto"
else:
schema = output_cls.schema()
return resolve_tool_choice(schema["title"])
def _get_json_str(raw_str: str, start_idx: int) -> Tuple[Optional[str], int]:
"""Extract JSON str from raw string and start index."""
raw_str = raw_str[start_idx:]
stack_count = 0
for i, c in enumerate(raw_str):
if c == "{":
stack_count += 1
if c == "}":
stack_count -= 1
if stack_count == 0:
return raw_str[: i + 1], i + 2 + start_idx
return None, start_idx
def _parse_tool_calls(
tool_calls: List[OpenAIToolCall],
output_cls: Type[Model],
allow_multiple: bool = False,
verbose: bool = False,
) -> Union[Model, List[Model]]:
outputs = []
for tool_call in tool_calls:
function_call = tool_call.function
# validations to get passed mypy
assert function_call is not None
assert function_call.name is not None
assert function_call.arguments is not None
if verbose:
name = function_call.name
arguments_str = function_call.arguments
print(f"Function call: {name} with args: {arguments_str}")
if isinstance(function_call.arguments, dict):
output = output_cls.parse_obj(function_call.arguments)
else:
output = output_cls.parse_raw(function_call.arguments)
outputs.append(output)
if allow_multiple:
return outputs
else:
if len(outputs) > 1:
_logger.warning(
"Multiple outputs found, returning first one. "
"If you want to return all outputs, set output_multiple=True."
)
return outputs[0]
class OpenAIPydanticProgram(BaseLLMFunctionProgram[LLM]):
"""
An OpenAI-based function that returns a pydantic model.
Note: this interface is not yet stable.
"""
def __init__(
self,
output_cls: Type[Model],
llm: LLM,
prompt: BasePromptTemplate,
tool_choice: Union[str, Dict[str, Any]],
allow_multiple: bool = False,
verbose: bool = False,
) -> None:
"""Init params."""
self._output_cls = output_cls
self._llm = llm
self._prompt = prompt
self._verbose = verbose
self._allow_multiple = allow_multiple
self._tool_choice = tool_choice
@classmethod
def from_defaults(
cls,
output_cls: Type[Model],
prompt_template_str: Optional[str] = None,
prompt: Optional[PromptTemplate] = None,
llm: Optional[LLM] = None,
verbose: bool = False,
allow_multiple: bool = False,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
**kwargs: Any,
) -> "OpenAIPydanticProgram":
llm = llm or OpenAI(model="gpt-3.5-turbo-0613")
if not isinstance(llm, OpenAI):
raise ValueError(
"OpenAIPydanticProgram only supports OpenAI LLMs. " f"Got: {type(llm)}"
)
if not llm.metadata.is_function_calling_model:
raise ValueError(
f"Model name {llm.metadata.model_name} does not support "
"function calling API. "
)
if prompt is None and prompt_template_str is None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt is not None and prompt_template_str is not None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt_template_str is not None:
prompt = PromptTemplate(prompt_template_str)
tool_choice = tool_choice or _default_tool_choice(output_cls, allow_multiple)
return cls(
output_cls=output_cls,
llm=llm,
prompt=cast(PromptTemplate, prompt),
tool_choice=tool_choice,
allow_multiple=allow_multiple,
verbose=verbose,
)
@property
def output_cls(self) -> Type[Model]:
return self._output_cls
@property
def prompt(self) -> BasePromptTemplate:
return self._prompt
@prompt.setter
def prompt(self, prompt: BasePromptTemplate) -> None:
self._prompt = prompt
def __call__(
self,
llm_kwargs: Optional[Dict[str, Any]] = None,
*args: Any,
**kwargs: Any,
) -> Union[Model, List[Model]]:
llm_kwargs = llm_kwargs or {}
description = self._description_eval(**kwargs)
openai_fn_spec = to_openai_tool(self._output_cls, description=description)
messages = self._prompt.format_messages(llm=self._llm, **kwargs)
chat_response = self._llm.chat(
messages=messages,
tools=[openai_fn_spec],
tool_choice=self._tool_choice,
**llm_kwargs,
)
message = chat_response.message
if "tool_calls" not in message.additional_kwargs:
raise ValueError(
"Expected tool_calls in ai_message.additional_kwargs, "
"but none found."
)
tool_calls = message.additional_kwargs["tool_calls"]
return _parse_tool_calls(
tool_calls,
output_cls=self.output_cls,
allow_multiple=self._allow_multiple,
verbose=self._verbose,
)
async def acall(
self,
llm_kwargs: Optional[Dict[str, Any]] = None,
*args: Any,
**kwargs: Any,
) -> Union[Model, List[Model]]:
llm_kwargs = llm_kwargs or {}
description = self._description_eval(**kwargs)
openai_fn_spec = to_openai_tool(self._output_cls, description=description)
messages = self._prompt.format_messages(llm=self._llm, **kwargs)
chat_response = await self._llm.achat(
messages=messages,
tools=[openai_fn_spec],
tool_choice=self._tool_choice,
**llm_kwargs,
)
message = chat_response.message
if "tool_calls" not in message.additional_kwargs:
raise ValueError(
"Expected function call in ai_message.additional_kwargs, "
"but none found."
)
tool_calls = message.additional_kwargs["tool_calls"]
return _parse_tool_calls(
tool_calls,
output_cls=self.output_cls,
allow_multiple=self._allow_multiple,
verbose=self._verbose,
)
def stream_list(
self,
llm_kwargs: Optional[Dict[str, Any]] = None,
*args: Any,
**kwargs: Any,
) -> Generator[Model, None, None]:
"""Streams a list of objects."""
llm_kwargs = llm_kwargs or {}
messages = self._prompt.format_messages(llm=self._llm, **kwargs)
description = self._description_eval(**kwargs)
list_output_cls = create_list_model(self._output_cls)
openai_fn_spec = to_openai_tool(list_output_cls, description=description)
chat_response_gen = self._llm.stream_chat(
messages=messages,
tools=[openai_fn_spec],
tool_choice=_default_tool_choice(list_output_cls),
**llm_kwargs,
)
# extract function call arguments
# obj_start_idx finds start position (before a new "{" in JSON)
obj_start_idx: int = -1 # NOTE: uninitialized
for stream_resp in chat_response_gen:
kwargs = stream_resp.message.additional_kwargs
tool_calls = kwargs["tool_calls"]
if len(tool_calls) == 0:
continue
# NOTE: right now assume only one tool call
# TODO: handle parallel tool calls in streaming setting
fn_args = kwargs["tool_calls"][0].function.arguments
# this is inspired by `get_object` from `MultiTaskBase` in
# the openai_function_call repo
if fn_args.find("[") != -1:
if obj_start_idx == -1:
obj_start_idx = fn_args.find("[") + 1
else:
# keep going until we find the start position
continue
new_obj_json_str, obj_start_idx = _get_json_str(fn_args, obj_start_idx)
if new_obj_json_str is not None:
obj_json_str = new_obj_json_str
obj = self._output_cls.parse_raw(obj_json_str)
if self._verbose:
print(f"Extracted object: {obj.json()}")
yield obj
def _description_eval(self, **kwargs: Any) -> Optional[str]:
description = kwargs.get("description", None)
## __doc__ checks if docstring is provided in the Pydantic Model
if not (self._output_cls.__doc__ or description):
raise ValueError(
"Must provide description for your Pydantic Model. Either provide a docstring or add `description=<your_description>` to the method. Required to convert Pydantic Model to OpenAI Function."
)
## If both docstring and description are provided, raise error
if self._output_cls.__doc__ and description:
raise ValueError(
"Must provide either a docstring or a description, not both."
)
return description
| [
"llama_index.legacy.agent.openai.utils.resolve_tool_choice",
"llama_index.legacy.llms.openai_utils.to_openai_tool",
"llama_index.legacy.llms.openai.OpenAI",
"llama_index.legacy.prompts.base.PromptTemplate",
"llama_index.legacy.program.utils.create_list_model"
] | [((619, 646), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (636, 646), False, 'import logging\n'), ((914, 950), 'llama_index.legacy.agent.openai.utils.resolve_tool_choice', 'resolve_tool_choice', (["schema['title']"], {}), "(schema['title'])\n", (933, 950), False, 'from llama_index.legacy.agent.openai.utils import resolve_tool_choice\n'), ((5395, 5452), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['self._output_cls'], {'description': 'description'}), '(self._output_cls, description=description)\n', (5409, 5452), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((6503, 6560), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['self._output_cls'], {'description': 'description'}), '(self._output_cls, description=description)\n', (6517, 6560), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((7740, 7775), 'llama_index.legacy.program.utils.create_list_model', 'create_list_model', (['self._output_cls'], {}), '(self._output_cls)\n', (7757, 7775), False, 'from llama_index.legacy.program.utils import create_list_model\n'), ((7801, 7857), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['list_output_cls'], {'description': 'description'}), '(list_output_cls, description=description)\n', (7815, 7857), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((3679, 3713), 'llama_index.legacy.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (3685, 3713), False, 'from llama_index.legacy.llms.openai import OpenAI\n'), ((4460, 4495), 'llama_index.legacy.prompts.base.PromptTemplate', 'PromptTemplate', (['prompt_template_str'], {}), '(prompt_template_str)\n', (4474, 4495), False, 'from llama_index.legacy.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((4679, 4707), 'typing.cast', 'cast', (['PromptTemplate', 'prompt'], {}), '(PromptTemplate, prompt)\n', (4683, 4707), False, 'from typing import Any, Dict, Generator, List, Optional, Tuple, Type, Union, cast\n')] |
# use SQLAlchemy to setup a simple sqlite db
from sqlalchemy import (Column, Integer, MetaData, String, Table, column,
create_engine, select)
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
# create a toy city_stats table
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
# insert some datapoints
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2731571, "country": "Canada"},
{"city_name": "Tokyo", "population": 13929286, "country": "Japan"},
{"city_name": "Berlin", "population": 600000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.connect() as connection:
cursor = connection.execute(stmt)
from llama_index import SQLDatabase
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
| [
"llama_index.SQLDatabase"
] | [((176, 211), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///:memory:"""'], {}), "('sqlite:///:memory:')\n", (189, 211), False, 'from sqlalchemy import Column, Integer, MetaData, String, Table, column, create_engine, select\n'), ((227, 237), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (235, 237), False, 'from sqlalchemy import Column, Integer, MetaData, String, Table, column, create_engine, select\n'), ((1030, 1080), 'llama_index.SQLDatabase', 'SQLDatabase', (['engine'], {'include_tables': "['city_stats']"}), "(engine, include_tables=['city_stats'])\n", (1041, 1080), False, 'from llama_index import SQLDatabase\n'), ((416, 445), 'sqlalchemy.Column', 'Column', (['"""population"""', 'Integer'], {}), "('population', Integer)\n", (422, 445), False, 'from sqlalchemy import Column, Integer, MetaData, String, Table, column, create_engine, select\n'), ((381, 391), 'sqlalchemy.String', 'String', (['(16)'], {}), '(16)\n', (387, 391), False, 'from sqlalchemy import Column, Integer, MetaData, String, Table, column, create_engine, select\n'), ((469, 479), 'sqlalchemy.String', 'String', (['(16)'], {}), '(16)\n', (475, 479), False, 'from sqlalchemy import Column, Integer, MetaData, String, Table, column, create_engine, select\n'), ((847, 871), 'sqlalchemy.insert', 'insert', (['city_stats_table'], {}), '(city_stats_table)\n', (853, 871), False, 'from sqlalchemy import insert\n')] |
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
class GPTModel:
def __init__(self, directory_path):
# set maximum input size
self.max_input_size = 4096
# set number of output tokens
self.num_outputs = 2000
# set maximum chunk overlap
self.max_chunk_overlap = 20
# set chunk size limit
self.chunk_size_limit = 600
self.directory_path = directory_path
def construct_index(self):
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="text-davinci-003", max_tokens=self.num_outputs))
prompt_helper = PromptHelper(self.max_input_size, self.num_outputs, self.max_chunk_overlap, chunk_size_limit=self.chunk_size_limit)
documents = SimpleDirectoryReader(self.directory_path).load_data()
index = GPTSimpleVectorIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
index.save_to_disk('gptModel.json') | [
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((673, 792), 'llama_index.PromptHelper', 'PromptHelper', (['self.max_input_size', 'self.num_outputs', 'self.max_chunk_overlap'], {'chunk_size_limit': 'self.chunk_size_limit'}), '(self.max_input_size, self.num_outputs, self.max_chunk_overlap,\n chunk_size_limit=self.chunk_size_limit)\n', (685, 792), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((878, 972), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (898, 972), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((565, 653), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""text-davinci-003"""', 'max_tokens': 'self.num_outputs'}), "(temperature=0.5, model_name='text-davinci-003', max_tokens=self.\n num_outputs)\n", (571, 653), False, 'from langchain import OpenAI\n'), ((808, 850), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['self.directory_path'], {}), '(self.directory_path)\n', (829, 850), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n')] |
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast
import httpx
from openai import AsyncOpenAI
from openai import OpenAI as SyncOpenAI
from openai.types.chat import ChatCompletionMessageParam
from openai.types.chat.chat_completion_chunk import (
ChatCompletionChunk,
ChoiceDelta,
ChoiceDeltaToolCall,
)
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
)
from llama_index.legacy.llms.generic_utils import (
messages_to_prompt as generic_messages_to_prompt,
)
from llama_index.legacy.llms.openai_utils import (
from_openai_message,
resolve_openai_credentials,
to_openai_message_dicts,
)
from llama_index.legacy.multi_modal_llms import (
MultiModalLLM,
MultiModalLLMMetadata,
)
from llama_index.legacy.multi_modal_llms.openai_utils import (
GPT4V_MODELS,
generate_openai_multi_modal_chat_message,
)
from llama_index.legacy.schema import ImageDocument
class OpenAIMultiModal(MultiModalLLM):
model: str = Field(description="The Multi-Modal model to use from OpenAI.")
temperature: float = Field(description="The temperature to use for sampling.")
max_new_tokens: Optional[int] = Field(
description=" The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt",
gt=0,
)
context_window: Optional[int] = Field(
description="The maximum number of context tokens for the model.",
gt=0,
)
image_detail: str = Field(
description="The level of details for image in API calls. Can be low, high, or auto"
)
max_retries: int = Field(
default=3,
description="Maximum number of retries.",
gte=0,
)
timeout: float = Field(
default=60.0,
description="The timeout, in seconds, for API requests.",
gte=0,
)
api_key: str = Field(default=None, description="The OpenAI API key.", exclude=True)
api_base: str = Field(default=None, description="The base URL for OpenAI API.")
api_version: str = Field(description="The API version for OpenAI API.")
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
default_headers: Dict[str, str] = Field(
default=None, description="The default headers for API requests."
)
_messages_to_prompt: Callable = PrivateAttr()
_completion_to_prompt: Callable = PrivateAttr()
_client: SyncOpenAI = PrivateAttr()
_aclient: AsyncOpenAI = PrivateAttr()
_http_client: Optional[httpx.Client] = PrivateAttr()
def __init__(
self,
model: str = "gpt-4-vision-preview",
temperature: float = DEFAULT_TEMPERATURE,
max_new_tokens: Optional[int] = 300,
additional_kwargs: Optional[Dict[str, Any]] = None,
context_window: Optional[int] = DEFAULT_CONTEXT_WINDOW,
max_retries: int = 3,
timeout: float = 60.0,
image_detail: str = "low",
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
messages_to_prompt: Optional[Callable] = None,
completion_to_prompt: Optional[Callable] = None,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
self._messages_to_prompt = messages_to_prompt or generic_messages_to_prompt
self._completion_to_prompt = completion_to_prompt or (lambda x: x)
api_key, api_base, api_version = resolve_openai_credentials(
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
super().__init__(
model=model,
temperature=temperature,
max_new_tokens=max_new_tokens,
additional_kwargs=additional_kwargs or {},
context_window=context_window,
image_detail=image_detail,
max_retries=max_retries,
timeout=timeout,
api_key=api_key,
api_base=api_base,
api_version=api_version,
callback_manager=callback_manager,
default_headers=default_headers,
**kwargs,
)
self._http_client = http_client
self._client, self._aclient = self._get_clients(**kwargs)
def _get_clients(self, **kwargs: Any) -> Tuple[SyncOpenAI, AsyncOpenAI]:
client = SyncOpenAI(**self._get_credential_kwargs())
aclient = AsyncOpenAI(**self._get_credential_kwargs())
return client, aclient
@classmethod
def class_name(cls) -> str:
return "openai_multi_modal_llm"
@property
def metadata(self) -> MultiModalLLMMetadata:
"""Multi Modal LLM metadata."""
return MultiModalLLMMetadata(
num_output=self.max_new_tokens or DEFAULT_NUM_OUTPUTS,
model_name=self.model,
)
def _get_credential_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
"api_key": self.api_key,
"base_url": self.api_base,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"http_client": self._http_client,
"timeout": self.timeout,
**kwargs,
}
def _get_multi_modal_chat_messages(
self,
prompt: str,
role: str,
image_documents: Sequence[ImageDocument],
**kwargs: Any,
) -> List[ChatCompletionMessageParam]:
return to_openai_message_dicts(
[
generate_openai_multi_modal_chat_message(
prompt=prompt,
role=role,
image_documents=image_documents,
image_detail=self.image_detail,
)
]
)
# Model Params for OpenAI GPT4V model.
def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
if self.model not in GPT4V_MODELS:
raise ValueError(
f"Invalid model {self.model}. "
f"Available models are: {list(GPT4V_MODELS.keys())}"
)
base_kwargs = {"model": self.model, "temperature": self.temperature, **kwargs}
if self.max_new_tokens is not None:
# If max_tokens is None, don't include in the payload:
# https://platform.openai.com/docs/api-reference/chat
# https://platform.openai.com/docs/api-reference/completions
base_kwargs["max_tokens"] = self.max_new_tokens
return {**base_kwargs, **self.additional_kwargs}
def _get_response_token_counts(self, raw_response: Any) -> dict:
"""Get the token usage reported by the response."""
if not isinstance(raw_response, dict):
return {}
usage = raw_response.get("usage", {})
# NOTE: other model providers that use the OpenAI client may not report usage
if usage is None:
return {}
return {
"prompt_tokens": usage.get("prompt_tokens", 0),
"completion_tokens": usage.get("completion_tokens", 0),
"total_tokens": usage.get("total_tokens", 0),
}
def _complete(
self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_model_kwargs(**kwargs)
message_dict = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER, image_documents=image_documents
)
response = self._client.chat.completions.create(
messages=message_dict,
stream=False,
**all_kwargs,
)
return CompletionResponse(
text=response.choices[0].message.content,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
all_kwargs = self._get_model_kwargs(**kwargs)
message_dicts = to_openai_message_dicts(messages)
response = self._client.chat.completions.create(
messages=message_dicts,
stream=False,
**all_kwargs,
)
openai_message = response.choices[0].message
message = from_openai_message(openai_message)
return ChatResponse(
message=message,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
def _stream_complete(
self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any
) -> CompletionResponseGen:
all_kwargs = self._get_model_kwargs(**kwargs)
message_dict = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER, image_documents=image_documents
)
def gen() -> CompletionResponseGen:
text = ""
for response in self._client.chat.completions.create(
messages=message_dict,
stream=True,
**all_kwargs,
):
response = cast(ChatCompletionChunk, response)
if len(response.choices) > 0:
delta = response.choices[0].delta
else:
delta = ChoiceDelta()
# update using deltas
content_delta = delta.content or ""
text += content_delta
yield CompletionResponse(
delta=content_delta,
text=text,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
message_dicts = to_openai_message_dicts(messages)
def gen() -> ChatResponseGen:
content = ""
tool_calls: List[ChoiceDeltaToolCall] = []
is_function = False
for response in self._client.chat.completions.create(
messages=message_dicts,
stream=True,
**self._get_model_kwargs(**kwargs),
):
response = cast(ChatCompletionChunk, response)
if len(response.choices) > 0:
delta = response.choices[0].delta
else:
delta = ChoiceDelta()
# check if this chunk is the start of a function call
if delta.tool_calls:
is_function = True
# update using deltas
role = delta.role or MessageRole.ASSISTANT
content_delta = delta.content or ""
content += content_delta
additional_kwargs = {}
if is_function:
tool_calls = self._update_tool_calls(tool_calls, delta.tool_calls)
additional_kwargs["tool_calls"] = tool_calls
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
def complete(
self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any
) -> CompletionResponse:
return self._complete(prompt, image_documents, **kwargs)
def stream_complete(
self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any
) -> CompletionResponseGen:
return self._stream_complete(prompt, image_documents, **kwargs)
def chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return self._chat(messages, **kwargs)
def stream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseGen:
return self._stream_chat(messages, **kwargs)
# ===== Async Endpoints =====
async def _acomplete(
self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_model_kwargs(**kwargs)
message_dict = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER, image_documents=image_documents
)
response = await self._aclient.chat.completions.create(
messages=message_dict,
stream=False,
**all_kwargs,
)
return CompletionResponse(
text=response.choices[0].message.content,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
async def acomplete(
self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any
) -> CompletionResponse:
return await self._acomplete(prompt, image_documents, **kwargs)
async def _astream_complete(
self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any
) -> CompletionResponseAsyncGen:
all_kwargs = self._get_model_kwargs(**kwargs)
message_dict = self._get_multi_modal_chat_messages(
prompt=prompt, role=MessageRole.USER, image_documents=image_documents
)
async def gen() -> CompletionResponseAsyncGen:
text = ""
async for response in await self._aclient.chat.completions.create(
messages=message_dict,
stream=True,
**all_kwargs,
):
response = cast(ChatCompletionChunk, response)
if len(response.choices) > 0:
delta = response.choices[0].delta
else:
delta = ChoiceDelta()
# update using deltas
content_delta = delta.content or ""
text += content_delta
yield CompletionResponse(
delta=content_delta,
text=text,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
async def _achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
all_kwargs = self._get_model_kwargs(**kwargs)
message_dicts = to_openai_message_dicts(messages)
response = await self._aclient.chat.completions.create(
messages=message_dicts,
stream=False,
**all_kwargs,
)
openai_message = response.choices[0].message
message = from_openai_message(openai_message)
return ChatResponse(
message=message,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
async def _astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
message_dicts = to_openai_message_dicts(messages)
async def gen() -> ChatResponseAsyncGen:
content = ""
tool_calls: List[ChoiceDeltaToolCall] = []
is_function = False
async for response in await self._aclient.chat.completions.create(
messages=message_dicts,
stream=True,
**self._get_model_kwargs(**kwargs),
):
response = cast(ChatCompletionChunk, response)
if len(response.choices) > 0:
delta = response.choices[0].delta
else:
delta = ChoiceDelta()
# check if this chunk is the start of a function call
if delta.tool_calls:
is_function = True
# update using deltas
role = delta.role or MessageRole.ASSISTANT
content_delta = delta.content or ""
content += content_delta
additional_kwargs = {}
if is_function:
tool_calls = self._update_tool_calls(tool_calls, delta.tool_calls)
additional_kwargs["tool_calls"] = tool_calls
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
async def astream_complete(
self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any
) -> CompletionResponseAsyncGen:
return await self._astream_complete(prompt, image_documents, **kwargs)
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return await self._achat(messages, **kwargs)
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
return await self._astream_chat(messages, **kwargs)
| [
"llama_index.legacy.llms.openai_utils.from_openai_message",
"llama_index.legacy.multi_modal_llms.MultiModalLLMMetadata",
"llama_index.legacy.multi_modal_llms.openai_utils.generate_openai_multi_modal_chat_message",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.llms.openai_utils.to_openai_message_dicts",
"llama_index.legacy.multi_modal_llms.openai_utils.GPT4V_MODELS.keys",
"llama_index.legacy.llms.openai_utils.resolve_openai_credentials"
] | [((1407, 1469), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Multi-Modal model to use from OpenAI."""'}), "(description='The Multi-Modal model to use from OpenAI.')\n", (1412, 1469), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1495, 1552), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1500, 1552), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1589, 1713), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '""" The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt"""', 'gt': '(0)'}), "(description=\n ' The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt'\n , gt=0)\n", (1594, 1713), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1763, 1841), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(description='The maximum number of context tokens for the model.', gt=0)\n", (1768, 1841), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1889, 1985), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The level of details for image in API calls. Can be low, high, or auto"""'}), "(description=\n 'The level of details for image in API calls. Can be low, high, or auto')\n", (1894, 1985), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2018, 2083), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(3)', 'description': '"""Maximum number of retries."""', 'gte': '(0)'}), "(default=3, description='Maximum number of retries.', gte=0)\n", (2023, 2083), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2136, 2225), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(60.0)', 'description': '"""The timeout, in seconds, for API requests."""', 'gte': '(0)'}), "(default=60.0, description=\n 'The timeout, in seconds, for API requests.', gte=0)\n", (2141, 2225), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2271, 2339), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The OpenAI API key."""', 'exclude': '(True)'}), "(default=None, description='The OpenAI API key.', exclude=True)\n", (2276, 2339), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2360, 2423), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The base URL for OpenAI API."""'}), "(default=None, description='The base URL for OpenAI API.')\n", (2365, 2423), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2447, 2499), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The API version for OpenAI API."""'}), "(description='The API version for OpenAI API.')\n", (2452, 2499), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2540, 2625), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the OpenAI API."""'}), "(default_factory=dict, description='Additional kwargs for the OpenAI API.'\n )\n", (2545, 2625), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2673, 2745), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The default headers for API requests."""'}), "(default=None, description='The default headers for API requests.')\n", (2678, 2745), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2797, 2810), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2808, 2810), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2849, 2862), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2860, 2862), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2889, 2902), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2900, 2902), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2931, 2944), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2942, 2944), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2988, 3001), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2999, 3001), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4037, 4129), 'llama_index.legacy.llms.openai_utils.resolve_openai_credentials', 'resolve_openai_credentials', ([], {'api_key': 'api_key', 'api_base': 'api_base', 'api_version': 'api_version'}), '(api_key=api_key, api_base=api_base, api_version=\n api_version)\n', (4063, 4129), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((5276, 5379), 'llama_index.legacy.multi_modal_llms.MultiModalLLMMetadata', 'MultiModalLLMMetadata', ([], {'num_output': '(self.max_new_tokens or DEFAULT_NUM_OUTPUTS)', 'model_name': 'self.model'}), '(num_output=self.max_new_tokens or DEFAULT_NUM_OUTPUTS,\n model_name=self.model)\n', (5297, 5379), False, 'from llama_index.legacy.multi_modal_llms import MultiModalLLM, MultiModalLLMMetadata\n'), ((8542, 8575), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (8565, 8575), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((8802, 8837), 'llama_index.legacy.llms.openai_utils.from_openai_message', 'from_openai_message', (['openai_message'], {}), '(openai_message)\n', (8821, 8837), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((10361, 10394), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (10384, 10394), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15114, 15147), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (15137, 15147), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15381, 15416), 'llama_index.legacy.llms.openai_utils.from_openai_message', 'from_openai_message', (['openai_message'], {}), '(openai_message)\n', (15400, 15416), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15731, 15764), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (15754, 15764), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((6070, 6205), 'llama_index.legacy.multi_modal_llms.openai_utils.generate_openai_multi_modal_chat_message', 'generate_openai_multi_modal_chat_message', ([], {'prompt': 'prompt', 'role': 'role', 'image_documents': 'image_documents', 'image_detail': 'self.image_detail'}), '(prompt=prompt, role=role,\n image_documents=image_documents, image_detail=self.image_detail)\n', (6110, 6205), False, 'from llama_index.legacy.multi_modal_llms.openai_utils import GPT4V_MODELS, generate_openai_multi_modal_chat_message\n'), ((9628, 9663), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (9632, 9663), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((10776, 10811), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (10780, 10811), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((14330, 14365), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (14334, 14365), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((16170, 16205), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (16174, 16205), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((9814, 9827), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (9825, 9827), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((10962, 10975), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (10973, 10975), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((14516, 14529), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (14527, 14529), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((16356, 16369), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (16367, 16369), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((6602, 6621), 'llama_index.legacy.multi_modal_llms.openai_utils.GPT4V_MODELS.keys', 'GPT4V_MODELS.keys', ([], {}), '()\n', (6619, 6621), False, 'from llama_index.legacy.multi_modal_llms.openai_utils import GPT4V_MODELS, generate_openai_multi_modal_chat_message\n'), ((11603, 11679), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (11614, 11679), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((16997, 17073), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (17008, 17073), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n')] |
import os
from typing import Any
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding, OpenAIEmbeddingMode
from llama_index.prompts import PromptTemplate
from llama_index.indices.query.schema import QueryBundle
from llama_index.llms import OpenAI
from llama_index.postprocessor.types import BaseNodePostprocessor
from llama_index.schema import NodeWithScore
from src.common.utils import Settings
from src.datastore import CreateDataStore
class DocumentGroupingPostprocessor(BaseNodePostprocessor):
def _postprocess_nodes(
self, nodes: list[NodeWithScore], query_bundle: QueryBundle | None = None
) -> list[NodeWithScore]:
nodes_by_document: dict[str, Any] = {}
for node in nodes:
document_id = node.metadata["id"]
if document_id not in nodes_by_document:
nodes_by_document[document_id] = []
nodes_by_document[document_id].append(node)
out_nodes = []
for group in nodes_by_document.values():
content = "\n--------------------\n".join([n.get_content() for n in group])
score = max(n.score for n in group)
group[0].node.text = content
group[0].score = score
out_nodes.append(group[0])
return out_nodes
class LlamaIndexModel:
def __init__(
self,
top_k: int,
vector_store_query_mode: str,
alpha: float,
prompt: str,
response_mode: str,
load_model: bool = True,
):
self.model = OpenAI(model="gpt-3.5-turbo") if load_model else None
self.top_k = top_k
self.vector_store_query_mode = vector_store_query_mode
self.alpha = alpha
self.prompt = prompt
self.response_mode = response_mode
self.index = self.build_index()
def run(self, query: str):
self.query = query
self.response = self.build_response()
self.processed_response = self.process_response(self.response)
def build_index(self):
self.service_context = ServiceContext.from_defaults(
embed_model=OpenAIEmbedding(
mode=OpenAIEmbeddingMode.TEXT_SEARCH_MODE,
model="text-embedding-3-large",
api_key=os.environ["OPENAI_API_KEY"],
),
llm=self.model,
)
docstore = CreateDataStore(**Settings().datastore.model_dump())
docstore.setup_ingestion_pipeline()
return VectorStoreIndex.from_vector_store(
docstore.vector_store,
service_context=self.service_context,
show_progress=True,
use_async=True,
)
def build_response(self):
retriever = self.index.as_retriever(
vector_store_query_mode=self.vector_store_query_mode,
alpha=self.alpha,
similarity_top_k=self.top_k,
)
response = retriever.retrieve(self.query)
postprocessor = DocumentGroupingPostprocessor()
response = postprocessor.postprocess_nodes(response)
return response
@staticmethod
def process_response(response):
scores = [r.score for r in response]
out = [r.node.metadata for r in response]
for item in out:
item["score"] = scores.pop(0)
return out
def explain_dataset(self, response_num: int):
if not self.response:
raise ValueError("No response to explain")
text_qa_template = PromptTemplate(self.prompt)
response = self.response[response_num]
index = VectorStoreIndex(
nodes=[response.node], service_context=self.service_context
)
query_engine = index.as_query_engine(text_qa_template=text_qa_template)
response = query_engine.query(self.query)
self.explained_response = response.response
if __name__ == "__main__":
model = LlamaIndexModel(**Settings().model.model_dump())
model.run("diabetes")
model.processed_response
model.explain_dataset(2)
model.explained_response
| [
"llama_index.prompts.PromptTemplate",
"llama_index.VectorStoreIndex",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.llms.OpenAI",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((2518, 2654), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['docstore.vector_store'], {'service_context': 'self.service_context', 'show_progress': '(True)', 'use_async': '(True)'}), '(docstore.vector_store, service_context=\n self.service_context, show_progress=True, use_async=True)\n', (2552, 2654), False, 'from llama_index import ServiceContext, VectorStoreIndex\n'), ((3524, 3551), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['self.prompt'], {}), '(self.prompt)\n', (3538, 3551), False, 'from llama_index.prompts import PromptTemplate\n'), ((3615, 3692), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': '[response.node]', 'service_context': 'self.service_context'}), '(nodes=[response.node], service_context=self.service_context)\n', (3631, 3692), False, 'from llama_index import ServiceContext, VectorStoreIndex\n'), ((1582, 1611), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (1588, 1611), False, 'from llama_index.llms import OpenAI\n'), ((2156, 2289), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'mode': 'OpenAIEmbeddingMode.TEXT_SEARCH_MODE', 'model': '"""text-embedding-3-large"""', 'api_key': "os.environ['OPENAI_API_KEY']"}), "(mode=OpenAIEmbeddingMode.TEXT_SEARCH_MODE, model=\n 'text-embedding-3-large', api_key=os.environ['OPENAI_API_KEY'])\n", (2171, 2289), False, 'from llama_index.embeddings.openai import OpenAIEmbedding, OpenAIEmbeddingMode\n'), ((3956, 3966), 'src.common.utils.Settings', 'Settings', ([], {}), '()\n', (3964, 3966), False, 'from src.common.utils import Settings\n'), ((2424, 2434), 'src.common.utils.Settings', 'Settings', ([], {}), '()\n', (2432, 2434), False, 'from src.common.utils import Settings\n')] |
"""SQL Structured Store."""
from collections import defaultdict
from enum import Enum
from typing import Any, Optional, Sequence, Union
from sqlalchemy import Table
from llama_index.legacy.core.base_query_engine import BaseQueryEngine
from llama_index.legacy.core.base_retriever import BaseRetriever
from llama_index.legacy.data_structs.table import SQLStructTable
from llama_index.legacy.indices.common.struct_store.schema import SQLContextContainer
from llama_index.legacy.indices.common.struct_store.sql import (
SQLStructDatapointExtractor,
)
from llama_index.legacy.indices.struct_store.base import BaseStructStoreIndex
from llama_index.legacy.indices.struct_store.container_builder import (
SQLContextContainerBuilder,
)
from llama_index.legacy.schema import BaseNode
from llama_index.legacy.service_context import ServiceContext
from llama_index.legacy.utilities.sql_wrapper import SQLDatabase
class SQLQueryMode(str, Enum):
SQL = "sql"
NL = "nl"
class SQLStructStoreIndex(BaseStructStoreIndex[SQLStructTable]):
"""SQL Struct Store Index.
The SQLStructStoreIndex is an index that uses a SQL database
under the hood. During index construction, the data can be inferred
from unstructured documents given a schema extract prompt,
or it can be pre-loaded in the database.
During query time, the user can either specify a raw SQL query
or a natural language query to retrieve their data.
NOTE: this is deprecated.
Args:
documents (Optional[Sequence[DOCUMENTS_INPUT]]): Documents to index.
NOTE: in the SQL index, this is an optional field.
sql_database (Optional[SQLDatabase]): SQL database to use,
including table names to specify.
See :ref:`Ref-Struct-Store` for more details.
table_name (Optional[str]): Name of the table to use
for extracting data.
Either table_name or table must be specified.
table (Optional[Table]): SQLAlchemy Table object to use.
Specifying the Table object explicitly, instead of
the table name, allows you to pass in a view.
Either table_name or table must be specified.
sql_context_container (Optional[SQLContextContainer]): SQL context container.
an be generated from a SQLContextContainerBuilder.
See :ref:`Ref-Struct-Store` for more details.
"""
index_struct_cls = SQLStructTable
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
index_struct: Optional[SQLStructTable] = None,
service_context: Optional[ServiceContext] = None,
sql_database: Optional[SQLDatabase] = None,
table_name: Optional[str] = None,
table: Optional[Table] = None,
ref_doc_id_column: Optional[str] = None,
sql_context_container: Optional[SQLContextContainer] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
if sql_database is None:
raise ValueError("sql_database must be specified")
self.sql_database = sql_database
# needed here for data extractor
self._ref_doc_id_column = ref_doc_id_column
self._table_name = table_name
self._table = table
# if documents aren't specified, pass in a blank []
if index_struct is None:
nodes = nodes or []
super().__init__(
nodes=nodes,
index_struct=index_struct,
service_context=service_context,
**kwargs,
)
# TODO: index_struct context_dict is deprecated,
# we're migrating storage of information to here.
if sql_context_container is None:
container_builder = SQLContextContainerBuilder(sql_database)
sql_context_container = container_builder.build_context_container()
self.sql_context_container = sql_context_container
@property
def ref_doc_id_column(self) -> Optional[str]:
return self._ref_doc_id_column
def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> SQLStructTable:
"""Build index from nodes."""
index_struct = self.index_struct_cls()
if len(nodes) == 0:
return index_struct
else:
data_extractor = SQLStructDatapointExtractor(
self._service_context.llm,
self.schema_extract_prompt,
self.output_parser,
self.sql_database,
table_name=self._table_name,
table=self._table,
ref_doc_id_column=self._ref_doc_id_column,
)
# group nodes by ids
source_to_node = defaultdict(list)
for node in nodes:
source_to_node[node.ref_doc_id].append(node)
for node_set in source_to_node.values():
data_extractor.insert_datapoint_from_nodes(node_set)
return index_struct
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
data_extractor = SQLStructDatapointExtractor(
self._service_context.llm,
self.schema_extract_prompt,
self.output_parser,
self.sql_database,
table_name=self._table_name,
table=self._table,
ref_doc_id_column=self._ref_doc_id_column,
)
data_extractor.insert_datapoint_from_nodes(nodes)
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
raise NotImplementedError("Not supported")
def as_query_engine(
self, query_mode: Union[str, SQLQueryMode] = SQLQueryMode.NL, **kwargs: Any
) -> BaseQueryEngine:
# NOTE: lazy import
from llama_index.legacy.indices.struct_store.sql_query import (
NLStructStoreQueryEngine,
SQLStructStoreQueryEngine,
)
if query_mode == SQLQueryMode.NL:
return NLStructStoreQueryEngine(self, **kwargs)
elif query_mode == SQLQueryMode.SQL:
return SQLStructStoreQueryEngine(self, **kwargs)
else:
raise ValueError(f"Unknown query mode: {query_mode}")
GPTSQLStructStoreIndex = SQLStructStoreIndex
| [
"llama_index.legacy.indices.struct_store.container_builder.SQLContextContainerBuilder",
"llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor",
"llama_index.legacy.indices.struct_store.sql_query.NLStructStoreQueryEngine",
"llama_index.legacy.indices.struct_store.sql_query.SQLStructStoreQueryEngine"
] | [((5106, 5332), 'llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor', 'SQLStructDatapointExtractor', (['self._service_context.llm', 'self.schema_extract_prompt', 'self.output_parser', 'self.sql_database'], {'table_name': 'self._table_name', 'table': 'self._table', 'ref_doc_id_column': 'self._ref_doc_id_column'}), '(self._service_context.llm, self.\n schema_extract_prompt, self.output_parser, self.sql_database,\n table_name=self._table_name, table=self._table, ref_doc_id_column=self.\n _ref_doc_id_column)\n', (5133, 5332), False, 'from llama_index.legacy.indices.common.struct_store.sql import SQLStructDatapointExtractor\n'), ((3747, 3787), 'llama_index.legacy.indices.struct_store.container_builder.SQLContextContainerBuilder', 'SQLContextContainerBuilder', (['sql_database'], {}), '(sql_database)\n', (3773, 3787), False, 'from llama_index.legacy.indices.struct_store.container_builder import SQLContextContainerBuilder\n'), ((4304, 4530), 'llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor', 'SQLStructDatapointExtractor', (['self._service_context.llm', 'self.schema_extract_prompt', 'self.output_parser', 'self.sql_database'], {'table_name': 'self._table_name', 'table': 'self._table', 'ref_doc_id_column': 'self._ref_doc_id_column'}), '(self._service_context.llm, self.\n schema_extract_prompt, self.output_parser, self.sql_database,\n table_name=self._table_name, table=self._table, ref_doc_id_column=self.\n _ref_doc_id_column)\n', (4331, 4530), False, 'from llama_index.legacy.indices.common.struct_store.sql import SQLStructDatapointExtractor\n'), ((4706, 4723), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4717, 4723), False, 'from collections import defaultdict\n'), ((5969, 6009), 'llama_index.legacy.indices.struct_store.sql_query.NLStructStoreQueryEngine', 'NLStructStoreQueryEngine', (['self'], {}), '(self, **kwargs)\n', (5993, 6009), False, 'from llama_index.legacy.indices.struct_store.sql_query import NLStructStoreQueryEngine, SQLStructStoreQueryEngine\n'), ((6074, 6115), 'llama_index.legacy.indices.struct_store.sql_query.SQLStructStoreQueryEngine', 'SQLStructStoreQueryEngine', (['self'], {}), '(self, **kwargs)\n', (6099, 6115), False, 'from llama_index.legacy.indices.struct_store.sql_query import NLStructStoreQueryEngine, SQLStructStoreQueryEngine\n')] |
"""Base vector store index query."""
from pathlib import Path
from typing import List, Optional
from llama_index import QueryBundle, StorageContext, load_index_from_storage
from llama_index.data_structs import NodeWithScore, IndexDict
from llama_index.indices.utils import log_vector_store_query_result
from llama_index.indices.vector_store import VectorIndexRetriever
from llama_index.token_counter.token_counter import llm_token_counter
from llama_index.vector_stores import FaissVectorStore
from llama_index.vector_stores.types import VectorStoreQuery
class FaissVectorIndexRetriever(VectorIndexRetriever):
"""Vector index retriever.
Args:
index (GPTVectorStoreIndex): vector store index.
similarity_top_k (int): number of top k results to return.
vector_store_query_mode (str): vector store query mode
See reference for VectorStoreQueryMode for full list of supported modes.
filters (Optional[MetadataFilters]): metadata filters, defaults to None
alpha (float): weight for sparse/dense retrieval, only used for
hybrid query mode.
doc_ids (Optional[List[str]]): list of documents to constrain search.
vector_store_kwargs (dict): Additional vector store specific kwargs to pass
through to the vector store at query time.
"""
@llm_token_counter("retrieve")
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
if self._vector_store.is_embedding_query:
if query_bundle.embedding is None:
query_bundle.embedding = (
self._service_context.embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
query = VectorStoreQuery(
query_embedding=query_bundle.embedding,
similarity_top_k=self._similarity_top_k,
doc_ids=self._doc_ids,
query_str=query_bundle.query_str,
mode=self._vector_store_query_mode,
alpha=self._alpha,
filters=self._filters,
)
query_result = self._vector_store.query(query, **self._kwargs)
# NOTE: vector store does not keep text and returns node indices.
# Need to recover all nodes from docstore
if query_result.ids is None:
raise ValueError(
"Vector store query result should return at "
"least one of nodes or ids."
)
assert isinstance(self._index.index_struct, IndexDict)
node_ids = [
self._doc_ids[int(idx)] for idx in query_result.ids
]
nodes = self._docstore.get_nodes(node_ids)
query_result.nodes = nodes
log_vector_store_query_result(query_result)
node_with_scores: List[NodeWithScore] = []
for ind, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[ind]
node_with_scores.append(NodeWithScore(node, score=score))
return node_with_scores
def get_retriever(root_dir):
datatypes = ['sherlock', 'coco', 'narratives']
retrievers = {}
for datatype in datatypes:
if datatype == 'sherlock':
datapath = f'{root_dir}/sherlock_dataset/sherlock_train_v1_1.json'
elif datatype == 'narratives':
datapath = f'{root_dir}/openimages_localized_narratives/open_images_train_v6_captions.jsonl'
elif datatype == 'coco':
datapath = f'{root_dir}/coco/dataset_coco.json'
else:
raise NotImplementedError
try:
persist_dir = str(Path(datapath).parent / f'{datatype}_index')
vector_store = FaissVectorStore.from_persist_dir(persist_dir=persist_dir)
storage_context = StorageContext.from_defaults(vector_store=vector_store, persist_dir=persist_dir)
index = load_index_from_storage(storage_context=storage_context)
retriever = FaissVectorIndexRetriever(index,
doc_ids=list(index.index_struct.nodes_dict.values()),
similarity_top_k=10)
retrievers[datatype] = retriever
except Exception as e:
print(f'Failed to load {datatype} retriever, {e}')
return retrievers
| [
"llama_index.vector_stores.FaissVectorStore.from_persist_dir",
"llama_index.token_counter.token_counter.llm_token_counter",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.utils.log_vector_store_query_result",
"llama_index.vector_stores.types.VectorStoreQuery",
"llama_index.data_structs.NodeWithScore",
"llama_index.load_index_from_storage"
] | [((1342, 1371), 'llama_index.token_counter.token_counter.llm_token_counter', 'llm_token_counter', (['"""retrieve"""'], {}), "('retrieve')\n", (1359, 1371), False, 'from llama_index.token_counter.token_counter import llm_token_counter\n'), ((1813, 2059), 'llama_index.vector_stores.types.VectorStoreQuery', 'VectorStoreQuery', ([], {'query_embedding': 'query_bundle.embedding', 'similarity_top_k': 'self._similarity_top_k', 'doc_ids': 'self._doc_ids', 'query_str': 'query_bundle.query_str', 'mode': 'self._vector_store_query_mode', 'alpha': 'self._alpha', 'filters': 'self._filters'}), '(query_embedding=query_bundle.embedding, similarity_top_k=\n self._similarity_top_k, doc_ids=self._doc_ids, query_str=query_bundle.\n query_str, mode=self._vector_store_query_mode, alpha=self._alpha,\n filters=self._filters)\n', (1829, 2059), False, 'from llama_index.vector_stores.types import VectorStoreQuery\n'), ((2778, 2821), 'llama_index.indices.utils.log_vector_store_query_result', 'log_vector_store_query_result', (['query_result'], {}), '(query_result)\n', (2807, 2821), False, 'from llama_index.indices.utils import log_vector_store_query_result\n'), ((3837, 3895), 'llama_index.vector_stores.FaissVectorStore.from_persist_dir', 'FaissVectorStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (3870, 3895), False, 'from llama_index.vector_stores import FaissVectorStore\n'), ((3926, 4011), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'persist_dir': 'persist_dir'}), '(vector_store=vector_store, persist_dir=persist_dir\n )\n', (3954, 4011), False, 'from llama_index import QueryBundle, StorageContext, load_index_from_storage\n'), ((4027, 4083), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context'}), '(storage_context=storage_context)\n', (4050, 4083), False, 'from llama_index import QueryBundle, StorageContext, load_index_from_storage\n'), ((3117, 3149), 'llama_index.data_structs.NodeWithScore', 'NodeWithScore', (['node'], {'score': 'score'}), '(node, score=score)\n', (3130, 3149), False, 'from llama_index.data_structs import NodeWithScore, IndexDict\n'), ((3764, 3778), 'pathlib.Path', 'Path', (['datapath'], {}), '(datapath)\n', (3768, 3778), False, 'from pathlib import Path\n')] |
import logging
import sys
import os.path
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# check if storage already exists
PERSIST_DIR = "./storage"
if not os.path.exists(PERSIST_DIR):
# load the documents and create the index
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# Either way we can now query the index
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print("got response: ")
print(response)
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((173, 232), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (192, 232), False, 'import logging\n'), ((264, 304), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (285, 304), False, 'import logging\n'), ((520, 562), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (551, 562), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((705, 758), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (733, 758), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((771, 811), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (794, 811), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((233, 252), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (250, 252), False, 'import logging\n'), ((466, 495), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (487, 495), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n')] |
import os
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
from IPython.display import Markdown, display
from llama_index import StorageContext, load_index_from_storage
# Set the OPENAI_API_KEY environment variable using the value from st.secrets['OPENAI_API_KEY']
os.environ['OPENAI_API_KEY'] = st.secrets['OPENAI_API_KEY']
# Load documents from the 'data' directory
documents = SimpleDirectoryReader('data').load_data()
# Create an index from the loaded documents
index = GPTVectorStoreIndex.from_documents(documents)
# Save the index to disk
index.storage_context.persist(persist_dir="./storage")
# Load the index from disk for testing
# loaded_index = load_index_from_storage(StorageContext.from_defaults(persist_dir="./storage"))
# Create a query engine from the loaded index
# query_engine = loaded_index.as_query_engine()
# Perform a query using the query engine
# response = query_engine.query("What is Citizens Round?")
# print(response)
| [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((495, 540), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (529, 540), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n'), ((400, 429), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (421, 429), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n')] |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.embeddings import resolve_embed_model
# Don't Import "from openai import OpenAI". It will panic
from llama_index.llms import OpenAI
# load data
documents = SimpleDirectoryReader("data").load_data()
# bge-m3 embedding model
embed_model = resolve_embed_model("local:BAAI/bge-small-en-v1.5")
# set LM Studio
llm = OpenAI(api_base="http://localhost:1234/v1", api_key="not-needed")
# Index the data
service_context = ServiceContext.from_defaults(
embed_model=embed_model, llm=llm,
)
index = VectorStoreIndex.from_documents(
documents, service_context=service_context
)
# query
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.embeddings.resolve_embed_model",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI"
] | [((337, 388), 'llama_index.embeddings.resolve_embed_model', 'resolve_embed_model', (['"""local:BAAI/bge-small-en-v1.5"""'], {}), "('local:BAAI/bge-small-en-v1.5')\n", (356, 388), False, 'from llama_index.embeddings import resolve_embed_model\n'), ((412, 477), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'api_base': '"""http://localhost:1234/v1"""', 'api_key': '"""not-needed"""'}), "(api_base='http://localhost:1234/v1', api_key='not-needed')\n", (418, 477), False, 'from llama_index.llms import OpenAI\n'), ((514, 576), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (542, 576), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((592, 667), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (623, 667), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((255, 284), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (276, 284), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n')] |
import os
import openai
from dotenv import load_dotenv
from llama_index.embeddings import AzureOpenAIEmbedding, OpenAIEmbedding
from llama_index.llms import AzureOpenAI, OpenAI, OpenAILike
from llama_index.llms.llama_utils import messages_to_prompt
def load_models(args, logger):
llm_service = args.llm_service
llm_model = args.llm_model
load_dotenv()
llm_temperature = 0.1
timeout = 120.0
if llm_model == "gpt3":
# _llm_model = "gpt-35-turbo"
_llm_model = "gpt-3.5-turbo-1106"
_azure_openai_key = os.getenv("AZURE_OPENAI_GPT4_KEY")
_azure_ada_deployment_name = "sketch-ai-gpt4-ada002"
_azure_endpoint = "https://open-ai-uk-south.openai.azure.com/"
_azure_deployment_name = "sketch-ai-gpt35turbo"
elif llm_model == "gpt4":
_azure_deployment_name = "sketch-ai-gpt4"
_llm_model = "gpt-4-1106-preview"
# _llm_model_oai = "gpt-4-1106-preview"
_azure_openai_key = os.getenv("AZURE_OPENAI_GPT4_KEY")
_azure_ada_deployment_name = "sketch-ai-gpt4-ada002"
_azure_endpoint = "https://open-ai-uk-south.openai.azure.com/"
elif llm_model == "local":
# TODO: Replace these once I figure out how to get local embedding server working
_azure_deployment_name = "sketch-ai-gpt4"
_azure_openai_key = os.getenv("AZURE_OPENAI_GPT4_KEY")
_azure_ada_deployment_name = "sketch-ai-gpt4-ada002"
_azure_endpoint = "https://open-ai-uk-south.openai.azure.com/"
api_version = "2023-07-01-preview"
else:
raise ValueError(f"Model {llm_model} not supported")
_llm = None
_embed_model = None
if llm_service == "openai":
logger.info("Using OPENAI services")
_embed_model = OpenAIEmbedding()
openai.api_key = os.getenv("OPENAI_API_KEY")
_llm = OpenAI(temperature=llm_temperature, model=_llm_model, timeout=timeout)
elif llm_service == "azure":
logger.info("Using AZURE services")
api_version = "2023-07-01-preview"
_llm = AzureOpenAI(
model=_llm_model,
deployment_name=_azure_deployment_name,
api_key=_azure_openai_key,
azure_endpoint=_azure_endpoint,
api_version=api_version,
temperature=llm_temperature,
timeout=timeout,
)
# You need to deploy your own embedding model as well as your own chat completion model
_embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name=_azure_ada_deployment_name,
api_key=_azure_openai_key,
azure_endpoint=_azure_endpoint,
api_version=api_version,
)
elif llm_service == "local":
MAC_M1_LUNADEMO_CONSERVATIVE_TIMEOUT = 10 * 60 # sec
_llm = OpenAILike(
max_tokens=4096,
temperature=0.9,
api_key="localai_fake",
api_version="localai_fake",
api_base=f"http://{args.local_llm_address}:{args.local_llm_port}/v1",
model="local llm",
is_chat_model=True,
timeout=MAC_M1_LUNADEMO_CONSERVATIVE_TIMEOUT,
messages_to_prompt=messages_to_prompt,
)
# TODO(qu): _embed_model = HuggingFaceEmbedding(model_name="WhereIsAI/UAE-Large-V1")
_embed_model = OpenAIEmbedding()
else:
raise ValueError(f"Service {llm_service} not supported")
logger.info(f"Loading embedded model {_embed_model.model_name} \n")
logger.info(f"Loading llm model {_llm.model} \n")
return _llm, _embed_model
| [
"llama_index.llms.AzureOpenAI",
"llama_index.embeddings.AzureOpenAIEmbedding",
"llama_index.llms.OpenAILike",
"llama_index.llms.OpenAI",
"llama_index.embeddings.OpenAIEmbedding"
] | [((353, 366), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (364, 366), False, 'from dotenv import load_dotenv\n'), ((550, 584), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_GPT4_KEY"""'], {}), "('AZURE_OPENAI_GPT4_KEY')\n", (559, 584), False, 'import os\n'), ((1760, 1777), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1775, 1777), False, 'from llama_index.embeddings import AzureOpenAIEmbedding, OpenAIEmbedding\n'), ((1804, 1831), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1813, 1831), False, 'import os\n'), ((1847, 1917), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': 'llm_temperature', 'model': '_llm_model', 'timeout': 'timeout'}), '(temperature=llm_temperature, model=_llm_model, timeout=timeout)\n', (1853, 1917), False, 'from llama_index.llms import AzureOpenAI, OpenAI, OpenAILike\n'), ((971, 1005), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_GPT4_KEY"""'], {}), "('AZURE_OPENAI_GPT4_KEY')\n", (980, 1005), False, 'import os\n'), ((2055, 2263), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'model': '_llm_model', 'deployment_name': '_azure_deployment_name', 'api_key': '_azure_openai_key', 'azure_endpoint': '_azure_endpoint', 'api_version': 'api_version', 'temperature': 'llm_temperature', 'timeout': 'timeout'}), '(model=_llm_model, deployment_name=_azure_deployment_name,\n api_key=_azure_openai_key, azure_endpoint=_azure_endpoint, api_version=\n api_version, temperature=llm_temperature, timeout=timeout)\n', (2066, 2263), False, 'from llama_index.llms import AzureOpenAI, OpenAI, OpenAILike\n'), ((2470, 2660), 'llama_index.embeddings.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'deployment_name': '_azure_ada_deployment_name', 'api_key': '_azure_openai_key', 'azure_endpoint': '_azure_endpoint', 'api_version': 'api_version'}), "(model='text-embedding-ada-002', deployment_name=\n _azure_ada_deployment_name, api_key=_azure_openai_key, azure_endpoint=\n _azure_endpoint, api_version=api_version)\n", (2490, 2660), False, 'from llama_index.embeddings import AzureOpenAIEmbedding, OpenAIEmbedding\n'), ((1337, 1371), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_GPT4_KEY"""'], {}), "('AZURE_OPENAI_GPT4_KEY')\n", (1346, 1371), False, 'import os\n'), ((2832, 3146), 'llama_index.llms.OpenAILike', 'OpenAILike', ([], {'max_tokens': '(4096)', 'temperature': '(0.9)', 'api_key': '"""localai_fake"""', 'api_version': '"""localai_fake"""', 'api_base': 'f"""http://{args.local_llm_address}:{args.local_llm_port}/v1"""', 'model': '"""local llm"""', 'is_chat_model': '(True)', 'timeout': 'MAC_M1_LUNADEMO_CONSERVATIVE_TIMEOUT', 'messages_to_prompt': 'messages_to_prompt'}), "(max_tokens=4096, temperature=0.9, api_key='localai_fake',\n api_version='localai_fake', api_base=\n f'http://{args.local_llm_address}:{args.local_llm_port}/v1', model=\n 'local llm', is_chat_model=True, timeout=\n MAC_M1_LUNADEMO_CONSERVATIVE_TIMEOUT, messages_to_prompt=messages_to_prompt\n )\n", (2842, 3146), False, 'from llama_index.llms import AzureOpenAI, OpenAI, OpenAILike\n'), ((3358, 3375), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (3373, 3375), False, 'from llama_index.embeddings import AzureOpenAIEmbedding, OpenAIEmbedding\n')] |
from llama_index.core.tools import FunctionTool
import os
note_file = os.path.join("data", "notes.txt")
def save_note(note):
if not os.path.exists(note_file):
open(note_file, "w")
with open(note_file, "a") as f:
f.writelines([note + "\n"])
return "note saved"
note_engine = FunctionTool.from_defaults(
fn=save_note,
name="note_saver",
description="this tool can save a text based note to a file for the user",
) | [
"llama_index.core.tools.FunctionTool.from_defaults"
] | [((71, 104), 'os.path.join', 'os.path.join', (['"""data"""', '"""notes.txt"""'], {}), "('data', 'notes.txt')\n", (83, 104), False, 'import os\n'), ((309, 448), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'save_note', 'name': '"""note_saver"""', 'description': '"""this tool can save a text based note to a file for the user"""'}), "(fn=save_note, name='note_saver', description=\n 'this tool can save a text based note to a file for the user')\n", (335, 448), False, 'from llama_index.core.tools import FunctionTool\n'), ((139, 164), 'os.path.exists', 'os.path.exists', (['note_file'], {}), '(note_file)\n', (153, 164), False, 'import os\n')] |
from llama_index import VectorStoreIndex, download_loader, StorageContext
from llama_index.vector_stores import PineconeVectorStore
"""Simple reader that reads wikipedia."""
from typing import Any, List
from llama_index.readers.base import BaseReader
from llama_index.schema import Document
from dotenv import load_dotenv
import os
import openai
import pinecone
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
class JaWikipediaReader(BaseReader):
"""Wikipedia reader.
Reads a page.
"""
def __init__(self) -> None:
"""Initialize with parameters."""
try:
import wikipedia # noqa: F401
except ImportError:
raise ImportError(
"`wikipedia` package not found, please run `pip install wikipedia`"
)
def load_data(self, pages: List[str], **load_kwargs: Any) -> List[Document]:
"""Load data from the input directory.
Args:
pages (List[str]): List of pages to read.
"""
import wikipedia
wikipedia.set_lang("ja")
results = []
for page in pages:
page_content = wikipedia.page(page, **load_kwargs).content
results.append(Document(text=page_content))
return results
WikipediaReader = download_loader("WikipediaReader")
loader = JaWikipediaReader()
documents = loader.load_data(pages=['ONE_PIECE', 'ONE_PIECEの登場人物一覧', 'ONE_PIECEの用語一覧', 'ONE_PIECEの地理'])
# init pinecone
pinecone.init(api_key=os.environ["OPENAI_API_KEY"], environment="asia-southeast1-gcp-free")
# pinecone.create_index("manga-reader", dimension=1536, metric="cosine", pod_type="p1")
# construct vector store and customize storage context
storage_context = StorageContext.from_defaults(
vector_store = PineconeVectorStore(pinecone.Index("manga-reader"))
)
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.schema.Document",
"llama_index.download_loader"
] | [((366, 379), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (377, 379), False, 'from dotenv import load_dotenv\n'), ((1291, 1325), 'llama_index.download_loader', 'download_loader', (['"""WikipediaReader"""'], {}), "('WikipediaReader')\n", (1306, 1325), False, 'from llama_index import VectorStoreIndex, download_loader, StorageContext\n'), ((1476, 1572), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['OPENAI_API_KEY']", 'environment': '"""asia-southeast1-gcp-free"""'}), "(api_key=os.environ['OPENAI_API_KEY'], environment=\n 'asia-southeast1-gcp-free')\n", (1489, 1572), False, 'import pinecone\n'), ((1841, 1916), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (1872, 1916), False, 'from llama_index import VectorStoreIndex, download_loader, StorageContext\n'), ((1049, 1073), 'wikipedia.set_lang', 'wikipedia.set_lang', (['"""ja"""'], {}), "('ja')\n", (1067, 1073), False, 'import wikipedia\n'), ((1799, 1829), 'pinecone.Index', 'pinecone.Index', (['"""manga-reader"""'], {}), "('manga-reader')\n", (1813, 1829), False, 'import pinecone\n'), ((1149, 1184), 'wikipedia.page', 'wikipedia.page', (['page'], {}), '(page, **load_kwargs)\n', (1163, 1184), False, 'import wikipedia\n'), ((1220, 1247), 'llama_index.schema.Document', 'Document', ([], {'text': 'page_content'}), '(text=page_content)\n', (1228, 1247), False, 'from llama_index.schema import Document\n')] |
# Load indices from disk
from llama_index.core import load_index_from_storage
from llama_index.core import StorageContext
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.llms.openai import OpenAI
from llama_index.core.query_engine import SubQuestionQueryEngine
from llama_index.agent.openai import OpenAIAgent
import json
import os
import openai
script_dir = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(script_dir, "config.json")
with open(config_path) as f:
config = json.load(f)
storage_dir = os.path.join(script_dir, config['storage-dir'])
os.environ["OPENAI_API_KEY"] = config['OPENAI_API_KEY']
openai.api_key = os.environ["OPENAI_API_KEY"]
# Load the cached data and create a query engine for each year which can be
# used by a chat model.
index_set = {}
individual_query_engine_tools = []
for year in config['years']:
storage_context = StorageContext.from_defaults(
persist_dir=os.path.join(storage_dir, f"{year}")
)
cur_index = load_index_from_storage(
storage_context,
)
index_set[year] = cur_index
tool = QueryEngineTool(
query_engine=index_set[year].as_query_engine(),
metadata=ToolMetadata(
name=f"vector_index_{year}",
description=f"useful for when you want to answer queries about the {year} SEC 10-K for Uber",
),
)
individual_query_engine_tools.append(tool)
# Create a tool that can query filings across multiple years
query_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=individual_query_engine_tools,
llm=OpenAI(model="gpt-3.5-turbo"),
)
query_engine_tool = QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name="sub_question_query_engine",
description="useful for when you want to answer queries that require analyzing multiple SEC 10-K documents for Uber",
),
)
# Pass all of the tools to the chat model agent
tools = individual_query_engine_tools + [query_engine_tool]
agent = OpenAIAgent.from_tools(tools)
| [
"llama_index.agent.openai.OpenAIAgent.from_tools",
"llama_index.core.tools.ToolMetadata",
"llama_index.core.load_index_from_storage",
"llama_index.llms.openai.OpenAI"
] | [((452, 491), 'os.path.join', 'os.path.join', (['script_dir', '"""config.json"""'], {}), "(script_dir, 'config.json')\n", (464, 491), False, 'import os\n'), ((562, 609), 'os.path.join', 'os.path.join', (['script_dir', "config['storage-dir']"], {}), "(script_dir, config['storage-dir'])\n", (574, 609), False, 'import os\n'), ((2039, 2068), 'llama_index.agent.openai.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', (['tools'], {}), '(tools)\n', (2061, 2068), False, 'from llama_index.agent.openai import OpenAIAgent\n'), ((410, 436), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (426, 436), False, 'import os\n'), ((534, 546), 'json.load', 'json.load', (['f'], {}), '(f)\n', (543, 546), False, 'import json\n'), ((1025, 1065), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1048, 1065), False, 'from llama_index.core import load_index_from_storage\n'), ((1616, 1645), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (1622, 1645), False, 'from llama_index.llms.openai import OpenAI\n'), ((1731, 1905), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""sub_question_query_engine"""', 'description': '"""useful for when you want to answer queries that require analyzing multiple SEC 10-K documents for Uber"""'}), "(name='sub_question_query_engine', description=\n 'useful for when you want to answer queries that require analyzing multiple SEC 10-K documents for Uber'\n )\n", (1743, 1905), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n'), ((966, 1002), 'os.path.join', 'os.path.join', (['storage_dir', 'f"""{year}"""'], {}), "(storage_dir, f'{year}')\n", (978, 1002), False, 'import os\n'), ((1214, 1359), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'f"""vector_index_{year}"""', 'description': 'f"""useful for when you want to answer queries about the {year} SEC 10-K for Uber"""'}), "(name=f'vector_index_{year}', description=\n f'useful for when you want to answer queries about the {year} SEC 10-K for Uber'\n )\n", (1226, 1359), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n')] |
import logging
logging.basicConfig(level=logging.CRITICAL)
import os
from pathlib import Path
import openai
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTVectorStoreIndex,
LLMPredictor,
ServiceContext,
StorageContext,
download_loader,
load_index_from_storage,
)
from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
history = []
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.618, model_name=models["gpt-3"], max_tokens=256))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=1024)
def make_index(file):
cls()
print("👀 Loading...")
PDFReader = download_loader("PDFReader")
loader = PDFReader()
documents = loader.load_data(file=Path(FILES) / file)
if os.path.exists(Path(CACHE) / file):
print("📚 Index found in cache")
return
else:
print("📚 Index not found in cache, creating it...")
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir=Path(CACHE) / file)
def chat(file_name, index):
while True:
prompt = input("\n😎 Prompt: ")
if prompt == "exit":
handle_exit()
elif prompt == "save":
handle_save(str(file_name), history)
query_engine = index.as_query_engine(response_mode="compact")
response = query_engine.query(prompt)
print("\n👻 Response: " + str(response))
history.append({"user": prompt, "response": str(response)})
def ask(file_name):
try:
print("👀 Loading...")
storage_context = StorageContext.from_defaults(persist_dir=Path(CACHE) / file_name)
index = load_index_from_storage(storage_context, service_context=service_context)
cls()
print("✅ Ready! Let's start the conversation")
print("ℹ️ Press Ctrl+C to exit")
chat(file_name, index)
except KeyboardInterrupt:
handle_exit()
if __name__ == "__main__":
initialize()
file = select_file()
if file:
file_name = Path(file).name
make_index(file_name)
ask(file_name)
else:
print("No files found")
handle_exit()
| [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.download_loader",
"llama_index.load_index_from_storage"
] | [((16, 59), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.CRITICAL'}), '(level=logging.CRITICAL)\n', (35, 59), False, 'import logging\n'), ((444, 457), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (455, 457), False, 'from dotenv import load_dotenv\n'), ((644, 729), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size_limit': '(1024)'}), '(llm_predictor=llm_predictor, chunk_size_limit=1024\n )\n', (672, 729), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((753, 758), 'utils.cls', 'cls', ([], {}), '()\n', (756, 758), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((802, 830), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (817, 830), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((2171, 2183), 'utils.initialize', 'initialize', ([], {}), '()\n', (2181, 2183), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2195, 2208), 'utils.select_file', 'select_file', ([], {}), '()\n', (2206, 2208), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((551, 624), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.618)', 'model_name': "models['gpt-3']", 'max_tokens': '(256)'}), "(temperature=0.618, model_name=models['gpt-3'], max_tokens=256)\n", (561, 624), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1099, 1177), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1133, 1177), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((1871, 1944), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1894, 1944), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((1953, 1958), 'utils.cls', 'cls', ([], {}), '()\n', (1956, 1958), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2361, 2374), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (2372, 2374), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((937, 948), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (941, 948), False, 'from pathlib import Path\n'), ((1374, 1387), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (1385, 1387), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2124, 2137), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (2135, 2137), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2242, 2252), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (2246, 2252), False, 'from pathlib import Path\n'), ((894, 905), 'pathlib.Path', 'Path', (['FILES'], {}), '(FILES)\n', (898, 905), False, 'from pathlib import Path\n'), ((1228, 1239), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (1232, 1239), False, 'from pathlib import Path\n'), ((1830, 1841), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (1834, 1841), False, 'from pathlib import Path\n')] |
# uses brave (requires api key) for web search then uses ollama for local embedding and inference, for a cost-free web RAG
# requires ollama to be installed and running
import os
import json
import logging
import sys
import requests
from dotenv import load_dotenv
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from llama_index.embeddings.ollama import OllamaEmbedding
from llama_index.core import VectorStoreIndex, Document
from llama_index.tools.brave_search import BraveSearchToolSpec
from llama_index.readers.web import SimpleWebPageReader
# Local Model Setup
from llama_index.core import Settings
Settings.embed_model = OllamaEmbedding(model_name="nomic-embed-text")
# Make sure to run: ollama pull nomic-embed-text
from llama_index.llms.ollama import Ollama
Settings.llm = Ollama(model="mistral", request_timeout=360.0)
# Make sure to run: ollama pull mistral
# Constants
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
HEADERS = {'User-Agent': USER_AGENT}
RETRIES = Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
def setup_logging():
"""
Initialize logging configuration to output logs to stdout.
"""
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def load_environment_variables():
"""
Load environment variables from the .env file.
:return: The Brave API key.
"""
load_dotenv()
return os.getenv('BRAVE_API_KEY')
def perform_search(query, api_key):
"""
Perform a search using the Brave Search API.
:param query: The search query.
:param api_key: The Brave API key.
:return: The search response.
"""
tool_spec = BraveSearchToolSpec(api_key=api_key)
return tool_spec.brave_search(query=query)
def extract_search_results(response):
"""
Extract search results from the Brave Search API response.
:param response: The search response.
:return: A list of search results.
"""
documents = [doc.text for doc in response]
search_results = []
for document in documents:
response_data = json.loads(document)
search_results.extend(response_data.get('web', {}).get('results', []))
return search_results
def scrape_web_pages(search_results):
"""
Scrape web pages from the URLs obtained from the search results.
:param search_results: The list of search results.
:return: A list of scraped documents.
"""
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=RETRIES))
session.mount('https://', HTTPAdapter(max_retries=RETRIES))
all_documents = []
for result in search_results:
url = result.get('url')
try:
response = session.get(url, headers=HEADERS, timeout=10)
response.raise_for_status()
doc = Document(text=response.text, url=url)
all_documents.append(doc)
except requests.exceptions.RequestException as e:
logging.error(f"Failed to scrape {url}: {e}")
return all_documents
def main():
"""
Main function to orchestrate the search, scraping, and querying process.
"""
setup_logging()
api_key = load_environment_variables()
my_query = "What is RAG, retrieval augmented generation?"
response = perform_search(my_query, api_key)
search_results = extract_search_results(response)
all_documents = scrape_web_pages(search_results)
# Load all the scraped documents into the vector store
index = VectorStoreIndex.from_documents(all_documents)
# Use the index to query with the language model
query_engine = index.as_query_engine()
response = query_engine.query(my_query)
print(response)
if __name__ == "__main__":
main()
| [
"llama_index.llms.ollama.Ollama",
"llama_index.tools.brave_search.BraveSearchToolSpec",
"llama_index.embeddings.ollama.OllamaEmbedding",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.Document"
] | [((660, 706), 'llama_index.embeddings.ollama.OllamaEmbedding', 'OllamaEmbedding', ([], {'model_name': '"""nomic-embed-text"""'}), "(model_name='nomic-embed-text')\n", (675, 706), False, 'from llama_index.embeddings.ollama import OllamaEmbedding\n'), ((814, 860), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': '"""mistral"""', 'request_timeout': '(360.0)'}), "(model='mistral', request_timeout=360.0)\n", (820, 860), False, 'from llama_index.llms.ollama import Ollama\n'), ((1082, 1155), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(0.1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])\n', (1087, 1155), False, 'from urllib3.util.retry import Retry\n'), ((1261, 1320), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (1280, 1320), False, 'import logging\n'), ((1536, 1549), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1547, 1549), False, 'from dotenv import load_dotenv\n'), ((1561, 1587), 'os.getenv', 'os.getenv', (['"""BRAVE_API_KEY"""'], {}), "('BRAVE_API_KEY')\n", (1570, 1587), False, 'import os\n'), ((1815, 1851), 'llama_index.tools.brave_search.BraveSearchToolSpec', 'BraveSearchToolSpec', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (1834, 1851), False, 'from llama_index.tools.brave_search import BraveSearchToolSpec\n'), ((2585, 2603), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2601, 2603), False, 'import requests\n'), ((3642, 3688), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['all_documents'], {}), '(all_documents)\n', (3673, 3688), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((1356, 1396), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1377, 1396), False, 'import logging\n'), ((2224, 2244), 'json.loads', 'json.loads', (['document'], {}), '(document)\n', (2234, 2244), False, 'import json\n'), ((2633, 2665), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2644, 2665), False, 'from requests.adapters import HTTPAdapter\n'), ((2697, 2729), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2708, 2729), False, 'from requests.adapters import HTTPAdapter\n'), ((1325, 1344), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1342, 1344), False, 'import logging\n'), ((2961, 2998), 'llama_index.core.Document', 'Document', ([], {'text': 'response.text', 'url': 'url'}), '(text=response.text, url=url)\n', (2969, 2998), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((3107, 3152), 'logging.error', 'logging.error', (['f"""Failed to scrape {url}: {e}"""'], {}), "(f'Failed to scrape {url}: {e}')\n", (3120, 3152), False, 'import logging\n')] |
import tkinter as tk
from tkinter import filedialog
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader
import os
os.environ['OPENAI_API_KEY'] = 'sk-'# Your API key
class MyApp(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.master.configure(bg='#f0f0f0')
self.pack(fill='both', expand=True)
self.create_widgets()
def create_widgets(self):
self.title_label = tk.Label(self, text="Document Chatbot", font=('Arial', 16, 'bold'), bg='#f0f0f0')
self.title_label.pack(pady=10)
self.select_dir_button = tk.Button(self, text="Choose Directory", command=self.select_directory, bg='#0c7cd5', fg='white', activebackground='#0a5ca1', activeforeground='white', borderwidth=0, padx=10, pady=5)
self.select_dir_button.pack(pady=(10,0))
self.selected_dir_label = tk.Label(self, text="", font=('Arial', 12), bg='#f0f0f0')
self.selected_dir_label.pack(pady=(0,10))
self.query_label = tk.Label(self, text="Query:", font=('Arial', 12), bg='#f0f0f0')
self.query_label.pack()
self.query_entry = tk.Entry(self, font=('Arial', 12), bd=2)
self.query_entry.pack(pady=(0,10), ipady=5, ipadx=10)
self.search_button = tk.Button(self, text="Search Documents", command=self.search, bg='#0c7cd5', fg='white', activebackground='#0a5ca1', activeforeground='white', borderwidth=0, padx=10, pady=5)
self.search_button.pack(pady=(0,10))
self.results_text = tk.Text(self, height=10, font=('Arial', 12), bg='#f5f5f5', fg='#333333', bd=2, padx=10, pady=10)
self.results_text.tag_configure('highlight', background='#bbeeff')
self.results_text.pack(fill='both', expand=True, padx=10)
def select_directory(self):
self.directory = filedialog.askdirectory()
self.selected_dir_label.configure(text=f"Selected directory: {self.directory}")
def search(self):
try:
documents = SimpleDirectoryReader(self.directory).load_data()
except AttributeError:
self.results_text.delete('1.0', tk.END)
self.results_text.insert(tk.END, "Please select a directory first.")
return
index = GPTSimpleVectorIndex(documents)
index.save_to_disk('index.json')
index = GPTSimpleVectorIndex.load_from_disk('index.json')
query = self.query_entry.get()
response = index.query(query)
self.results_text.delete('1.0', tk.END)
self.results_text.insert(tk.END, response)
if len(response) > 0:
start = '1.0'
while True:
start = self.results_text.search(query, start, stopindex=tk.END)
if not start:
break
end = f"{start}+{len(query)}c"
self.results_text.tag_add('highlight', start, end)
start = end
root = tk.Tk()
root.title("Document Chatbot")
root.geometry("500x500")
app = MyApp(root)
app.mainloop()
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.SimpleDirectoryReader"
] | [((3123, 3130), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (3128, 3130), True, 'import tkinter as tk\n'), ((505, 591), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Document Chatbot"""', 'font': "('Arial', 16, 'bold')", 'bg': '"""#f0f0f0"""'}), "(self, text='Document Chatbot', font=('Arial', 16, 'bold'), bg=\n '#f0f0f0')\n", (513, 591), True, 'import tkinter as tk\n'), ((671, 864), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Choose Directory"""', 'command': 'self.select_directory', 'bg': '"""#0c7cd5"""', 'fg': '"""white"""', 'activebackground': '"""#0a5ca1"""', 'activeforeground': '"""white"""', 'borderwidth': '(0)', 'padx': '(10)', 'pady': '(5)'}), "(self, text='Choose Directory', command=self.select_directory, bg=\n '#0c7cd5', fg='white', activebackground='#0a5ca1', activeforeground=\n 'white', borderwidth=0, padx=10, pady=5)\n", (680, 864), True, 'import tkinter as tk\n'), ((950, 1007), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""', 'font': "('Arial', 12)", 'bg': '"""#f0f0f0"""'}), "(self, text='', font=('Arial', 12), bg='#f0f0f0')\n", (958, 1007), True, 'import tkinter as tk\n'), ((1097, 1160), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Query:"""', 'font': "('Arial', 12)", 'bg': '"""#f0f0f0"""'}), "(self, text='Query:', font=('Arial', 12), bg='#f0f0f0')\n", (1105, 1160), True, 'import tkinter as tk\n'), ((1232, 1272), 'tkinter.Entry', 'tk.Entry', (['self'], {'font': "('Arial', 12)", 'bd': '(2)'}), "(self, font=('Arial', 12), bd=2)\n", (1240, 1272), True, 'import tkinter as tk\n'), ((1376, 1557), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Search Documents"""', 'command': 'self.search', 'bg': '"""#0c7cd5"""', 'fg': '"""white"""', 'activebackground': '"""#0a5ca1"""', 'activeforeground': '"""white"""', 'borderwidth': '(0)', 'padx': '(10)', 'pady': '(5)'}), "(self, text='Search Documents', command=self.search, bg='#0c7cd5',\n fg='white', activebackground='#0a5ca1', activeforeground='white',\n borderwidth=0, padx=10, pady=5)\n", (1385, 1557), True, 'import tkinter as tk\n'), ((1635, 1736), 'tkinter.Text', 'tk.Text', (['self'], {'height': '(10)', 'font': "('Arial', 12)", 'bg': '"""#f5f5f5"""', 'fg': '"""#333333"""', 'bd': '(2)', 'padx': '(10)', 'pady': '(10)'}), "(self, height=10, font=('Arial', 12), bg='#f5f5f5', fg='#333333', bd\n =2, padx=10, pady=10)\n", (1642, 1736), True, 'import tkinter as tk\n'), ((1944, 1969), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {}), '()\n', (1967, 1969), False, 'from tkinter import filedialog\n'), ((2395, 2426), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {}), '(documents)\n', (2415, 2426), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader\n'), ((2495, 2544), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""index.json"""'], {}), "('index.json')\n", (2530, 2544), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader\n'), ((2131, 2168), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['self.directory'], {}), '(self.directory)\n', (2152, 2168), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader\n')] |
"""Composability graphs."""
from typing import Any, Dict, List, Optional, Sequence, Type, cast
from llama_index.legacy.core.base_query_engine import BaseQueryEngine
from llama_index.legacy.data_structs.data_structs import IndexStruct
from llama_index.legacy.indices.base import BaseIndex
from llama_index.legacy.schema import (
IndexNode,
NodeRelationship,
ObjectType,
RelatedNodeInfo,
)
from llama_index.legacy.service_context import ServiceContext
from llama_index.legacy.storage.storage_context import StorageContext
class ComposableGraph:
"""Composable graph."""
def __init__(
self,
all_indices: Dict[str, BaseIndex],
root_id: str,
storage_context: Optional[StorageContext] = None,
) -> None:
"""Init params."""
self._all_indices = all_indices
self._root_id = root_id
self.storage_context = storage_context
@property
def root_id(self) -> str:
return self._root_id
@property
def all_indices(self) -> Dict[str, BaseIndex]:
return self._all_indices
@property
def root_index(self) -> BaseIndex:
return self._all_indices[self._root_id]
@property
def index_struct(self) -> IndexStruct:
return self._all_indices[self._root_id].index_struct
@property
def service_context(self) -> ServiceContext:
return self._all_indices[self._root_id].service_context
@classmethod
def from_indices(
cls,
root_index_cls: Type[BaseIndex],
children_indices: Sequence[BaseIndex],
index_summaries: Optional[Sequence[str]] = None,
service_context: Optional[ServiceContext] = None,
storage_context: Optional[StorageContext] = None,
**kwargs: Any,
) -> "ComposableGraph": # type: ignore
"""Create composable graph using this index class as the root."""
service_context = service_context or ServiceContext.from_defaults()
with service_context.callback_manager.as_trace("graph_construction"):
if index_summaries is None:
for index in children_indices:
if index.index_struct.summary is None:
raise ValueError(
"Summary must be set for children indices. "
"If the index does a summary "
"(through index.index_struct.summary), then "
"it must be specified with then `index_summaries` "
"argument in this function. We will support "
"automatically setting the summary in the future."
)
index_summaries = [
index.index_struct.summary for index in children_indices
]
else:
# set summaries for each index
for index, summary in zip(children_indices, index_summaries):
index.index_struct.summary = summary
if len(children_indices) != len(index_summaries):
raise ValueError("indices and index_summaries must have same length!")
# construct index nodes
index_nodes = []
for index, summary in zip(children_indices, index_summaries):
assert isinstance(index.index_struct, IndexStruct)
index_node = IndexNode(
text=summary,
index_id=index.index_id,
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id=index.index_id, node_type=ObjectType.INDEX
)
},
)
index_nodes.append(index_node)
# construct root index
root_index = root_index_cls(
nodes=index_nodes,
service_context=service_context,
storage_context=storage_context,
**kwargs,
)
# type: ignore
all_indices: List[BaseIndex] = [
*cast(List[BaseIndex], children_indices),
root_index,
]
return cls(
all_indices={index.index_id: index for index in all_indices},
root_id=root_index.index_id,
storage_context=storage_context,
)
def get_index(self, index_struct_id: Optional[str] = None) -> BaseIndex:
"""Get index from index struct id."""
if index_struct_id is None:
index_struct_id = self._root_id
return self._all_indices[index_struct_id]
def as_query_engine(self, **kwargs: Any) -> BaseQueryEngine:
# NOTE: lazy import
from llama_index.legacy.query_engine.graph_query_engine import (
ComposableGraphQueryEngine,
)
return ComposableGraphQueryEngine(self, **kwargs)
| [
"llama_index.legacy.service_context.ServiceContext.from_defaults",
"llama_index.legacy.query_engine.graph_query_engine.ComposableGraphQueryEngine",
"llama_index.legacy.schema.RelatedNodeInfo"
] | [((4914, 4956), 'llama_index.legacy.query_engine.graph_query_engine.ComposableGraphQueryEngine', 'ComposableGraphQueryEngine', (['self'], {}), '(self, **kwargs)\n', (4940, 4956), False, 'from llama_index.legacy.query_engine.graph_query_engine import ComposableGraphQueryEngine\n'), ((1930, 1960), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (1958, 1960), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((4133, 4172), 'typing.cast', 'cast', (['List[BaseIndex]', 'children_indices'], {}), '(List[BaseIndex], children_indices)\n', (4137, 4172), False, 'from typing import Any, Dict, List, Optional, Sequence, Type, cast\n'), ((3584, 3651), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'index.index_id', 'node_type': 'ObjectType.INDEX'}), '(node_id=index.index_id, node_type=ObjectType.INDEX)\n', (3599, 3651), False, 'from llama_index.legacy.schema import IndexNode, NodeRelationship, ObjectType, RelatedNodeInfo\n')] |
from langchain.callbacks import CallbackManager
from llama_index import ServiceContext, PromptHelper, LLMPredictor
from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler
from core.embedding.openai_embedding import OpenAIEmbedding
from core.llm.llm_builder import LLMBuilder
class IndexBuilder:
@classmethod
def get_default_service_context(cls, tenant_id: str) -> ServiceContext:
# set number of output tokens
num_output = 512
# only for verbose
callback_manager = CallbackManager([DifyStdOutCallbackHandler()])
llm = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name='text-davinci-003',
temperature=0,
max_tokens=num_output,
callback_manager=callback_manager,
)
llm_predictor = LLMPredictor(llm=llm)
# These parameters here will affect the logic of segmenting the final synthesized response.
# The number of refinement iterations in the synthesis process depends
# on whether the length of the segmented output exceeds the max_input_size.
prompt_helper = PromptHelper(
max_input_size=3500,
num_output=num_output,
max_chunk_overlap=20
)
provider = LLMBuilder.get_default_provider(tenant_id)
model_credentials = LLMBuilder.get_model_credentials(
tenant_id=tenant_id,
model_provider=provider,
model_name='text-embedding-ada-002'
)
return ServiceContext.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=OpenAIEmbedding(**model_credentials),
)
@classmethod
def get_fake_llm_service_context(cls, tenant_id: str) -> ServiceContext:
llm = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name='fake'
)
return ServiceContext.from_defaults(
llm_predictor=LLMPredictor(llm=llm),
embed_model=OpenAIEmbedding()
)
| [
"llama_index.PromptHelper",
"llama_index.LLMPredictor"
] | [((599, 745), 'core.llm.llm_builder.LLMBuilder.to_llm', 'LLMBuilder.to_llm', ([], {'tenant_id': 'tenant_id', 'model_name': '"""text-davinci-003"""', 'temperature': '(0)', 'max_tokens': 'num_output', 'callback_manager': 'callback_manager'}), "(tenant_id=tenant_id, model_name='text-davinci-003',\n temperature=0, max_tokens=num_output, callback_manager=callback_manager)\n", (616, 745), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((838, 859), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (850, 859), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((1148, 1226), 'llama_index.PromptHelper', 'PromptHelper', ([], {'max_input_size': '(3500)', 'num_output': 'num_output', 'max_chunk_overlap': '(20)'}), '(max_input_size=3500, num_output=num_output, max_chunk_overlap=20)\n', (1160, 1226), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((1293, 1335), 'core.llm.llm_builder.LLMBuilder.get_default_provider', 'LLMBuilder.get_default_provider', (['tenant_id'], {}), '(tenant_id)\n', (1324, 1335), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((1365, 1485), 'core.llm.llm_builder.LLMBuilder.get_model_credentials', 'LLMBuilder.get_model_credentials', ([], {'tenant_id': 'tenant_id', 'model_provider': 'provider', 'model_name': '"""text-embedding-ada-002"""'}), "(tenant_id=tenant_id, model_provider=\n provider, model_name='text-embedding-ada-002')\n", (1397, 1485), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((1836, 1893), 'core.llm.llm_builder.LLMBuilder.to_llm', 'LLMBuilder.to_llm', ([], {'tenant_id': 'tenant_id', 'model_name': '"""fake"""'}), "(tenant_id=tenant_id, model_name='fake')\n", (1853, 1893), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((554, 581), 'core.callback_handler.std_out_callback_handler.DifyStdOutCallbackHandler', 'DifyStdOutCallbackHandler', ([], {}), '()\n', (579, 581), False, 'from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler\n'), ((1679, 1715), 'core.embedding.openai_embedding.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '(**model_credentials)\n', (1694, 1715), False, 'from core.embedding.openai_embedding import OpenAIEmbedding\n'), ((2000, 2021), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (2012, 2021), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((2047, 2064), 'core.embedding.openai_embedding.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (2062, 2064), False, 'from core.embedding.openai_embedding import OpenAIEmbedding\n')] |
#main.py
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.embeddings import resolve_embed_model
from llama_index.llms import OpenAI
documents = SimpleDirectoryReader("data-qas").load_data()
embed_model = resolve_embed_model("local:BAAI/bge-small-en-v1.5")
llm = OpenAI(temperature=0.7, api_base="http://localhost:1234/v1", api_key="not-needed")
service_context = ServiceContext.from_defaults(
embed_model=embed_model, llm=llm
)
index = VectorStoreIndex.from_documents(
documents, service_context=service_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Make 20 question-answer paris from the information provided. Focus on various types of cancers")
print(response)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.embeddings.resolve_embed_model",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI"
] | [((254, 305), 'llama_index.embeddings.resolve_embed_model', 'resolve_embed_model', (['"""local:BAAI/bge-small-en-v1.5"""'], {}), "('local:BAAI/bge-small-en-v1.5')\n", (273, 305), False, 'from llama_index.embeddings import resolve_embed_model\n'), ((313, 400), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.7)', 'api_base': '"""http://localhost:1234/v1"""', 'api_key': '"""not-needed"""'}), "(temperature=0.7, api_base='http://localhost:1234/v1', api_key=\n 'not-needed')\n", (319, 400), False, 'from llama_index.llms import OpenAI\n'), ((415, 477), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (443, 477), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((493, 568), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (524, 568), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((193, 226), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data-qas"""'], {}), "('data-qas')\n", (214, 226), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n')] |
"""Langchain memory wrapper (for LlamaIndex)."""
from typing import Any, Dict, List, Optional
from llama_index.core.bridge.langchain import (
AIMessage,
BaseChatMemory,
BaseMessage,
HumanMessage,
)
from llama_index.core.bridge.langchain import BaseMemory as Memory
from llama_index.core.bridge.pydantic import Field
from llama_index.core.indices.base import BaseIndex
from llama_index.core.schema import Document
from llama_index.core.utils import get_new_id
def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str:
"""Get prompt input key.
Copied over from langchain.
"""
# "stop" is a special key that can be passed as input but is not used to
# format the prompt.
prompt_input_keys = list(set(inputs).difference([*memory_variables, "stop"]))
if len(prompt_input_keys) != 1:
raise ValueError(f"One input key expected got {prompt_input_keys}")
return prompt_input_keys[0]
class GPTIndexMemory(Memory):
"""Langchain memory wrapper (for LlamaIndex).
Args:
human_prefix (str): Prefix for human input. Defaults to "Human".
ai_prefix (str): Prefix for AI output. Defaults to "AI".
memory_key (str): Key for memory. Defaults to "history".
index (BaseIndex): LlamaIndex instance.
query_kwargs (Dict[str, Any]): Keyword arguments for LlamaIndex query.
input_key (Optional[str]): Input key. Defaults to None.
output_key (Optional[str]): Output key. Defaults to None.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history"
index: BaseIndex
query_kwargs: Dict = Field(default_factory=dict)
output_key: Optional[str] = None
input_key: Optional[str] = None
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
return prompt_input_key
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
prompt_input_key = self._get_prompt_input_key(inputs)
query_str = inputs[prompt_input_key]
# TODO: wrap in prompt
# TODO: add option to return the raw text
# NOTE: currently it's a hack
query_engine = self.index.as_query_engine(**self.query_kwargs)
response = query_engine.query(query_str)
return {self.memory_key: str(response)}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
prompt_input_key = self._get_prompt_input_key(inputs)
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = next(iter(outputs.keys()))
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
doc_text = f"{human}\n{ai}"
doc = Document(text=doc_text)
self.index.insert(doc)
def clear(self) -> None:
"""Clear memory contents."""
def __repr__(self) -> str:
"""Return representation."""
return "GPTIndexMemory()"
class GPTIndexChatMemory(BaseChatMemory):
"""Langchain chat memory wrapper (for LlamaIndex).
Args:
human_prefix (str): Prefix for human input. Defaults to "Human".
ai_prefix (str): Prefix for AI output. Defaults to "AI".
memory_key (str): Key for memory. Defaults to "history".
index (BaseIndex): LlamaIndex instance.
query_kwargs (Dict[str, Any]): Keyword arguments for LlamaIndex query.
input_key (Optional[str]): Input key. Defaults to None.
output_key (Optional[str]): Output key. Defaults to None.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history"
index: BaseIndex
query_kwargs: Dict = Field(default_factory=dict)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_source: bool = False
id_to_message: Dict[str, BaseMessage] = Field(default_factory=dict)
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
return prompt_input_key
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
prompt_input_key = self._get_prompt_input_key(inputs)
query_str = inputs[prompt_input_key]
query_engine = self.index.as_query_engine(**self.query_kwargs)
response_obj = query_engine.query(query_str)
if self.return_source:
source_nodes = response_obj.source_nodes
if self.return_messages:
# get source messages from ids
source_ids = [sn.node.node_id for sn in source_nodes]
source_messages = [
m for id, m in self.id_to_message.items() if id in source_ids
]
# NOTE: type List[BaseMessage]
response: Any = source_messages
else:
source_texts = [sn.node.get_content() for sn in source_nodes]
response = "\n\n".join(source_texts)
else:
response = str(response_obj)
return {self.memory_key: response}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
prompt_input_key = self._get_prompt_input_key(inputs)
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = next(iter(outputs.keys()))
else:
output_key = self.output_key
# a bit different than existing langchain implementation
# because we want to track id's for messages
human_message = HumanMessage(content=inputs[prompt_input_key])
human_message_id = get_new_id(set(self.id_to_message.keys()))
ai_message = AIMessage(content=outputs[output_key])
ai_message_id = get_new_id(
set(self.id_to_message.keys()).union({human_message_id})
)
self.chat_memory.messages.append(human_message)
self.chat_memory.messages.append(ai_message)
self.id_to_message[human_message_id] = human_message
self.id_to_message[ai_message_id] = ai_message
human_txt = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai_txt = f"{self.ai_prefix}: " + outputs[output_key]
human_doc = Document(text=human_txt, id_=human_message_id)
ai_doc = Document(text=ai_txt, id_=ai_message_id)
self.index.insert(human_doc)
self.index.insert(ai_doc)
def clear(self) -> None:
"""Clear memory contents."""
def __repr__(self) -> str:
"""Return representation."""
return "GPTIndexMemory()"
| [
"llama_index.core.bridge.langchain.AIMessage",
"llama_index.core.bridge.langchain.HumanMessage",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.schema.Document"
] | [((1663, 1690), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1668, 1690), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((4306, 4333), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (4311, 4333), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((4484, 4511), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (4489, 4511), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((3365, 3388), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'doc_text'}), '(text=doc_text)\n', (3373, 3388), False, 'from llama_index.core.schema import Document\n'), ((6634, 6680), 'llama_index.core.bridge.langchain.HumanMessage', 'HumanMessage', ([], {'content': 'inputs[prompt_input_key]'}), '(content=inputs[prompt_input_key])\n', (6646, 6680), False, 'from llama_index.core.bridge.langchain import AIMessage, BaseChatMemory, BaseMessage, HumanMessage\n'), ((6772, 6810), 'llama_index.core.bridge.langchain.AIMessage', 'AIMessage', ([], {'content': 'outputs[output_key]'}), '(content=outputs[output_key])\n', (6781, 6810), False, 'from llama_index.core.bridge.langchain import AIMessage, BaseChatMemory, BaseMessage, HumanMessage\n'), ((7307, 7353), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'human_txt', 'id_': 'human_message_id'}), '(text=human_txt, id_=human_message_id)\n', (7315, 7353), False, 'from llama_index.core.schema import Document\n'), ((7371, 7411), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'ai_txt', 'id_': 'ai_message_id'}), '(text=ai_txt, id_=ai_message_id)\n', (7379, 7411), False, 'from llama_index.core.schema import Document\n')] |
import matplotlib.pyplot as plt
import polars as pl
import seaborn as sns
import torch
from llama_index.evaluation import RelevancyEvaluator
from llama_index.llms import HuggingFaceLLM
from llama_index.prompts import PromptTemplate
from tqdm import tqdm
from transformers import BitsAndBytesConfig
from src.common.utils import Settings
from src.model import LlamaIndexModel
pl.Config.set_tbl_formatting("NOTHING")
pl.Config.set_tbl_rows(4)
settings = Settings().model.model_dump()
settings["top_k"] = 5 # reduce eval time
model = LlamaIndexModel(**settings, load_model=True)
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
)
model.model = HuggingFaceLLM(
model_name="mistralai/Mistral-7B-Instruct-v0.1",
tokenizer_name="mistralai/Mistral-7B-Instruct-v0.1",
query_wrapper_prompt=PromptTemplate("<s>[INST] {query_str} [/INST] </s>\n"),
context_window=3900,
max_new_tokens=256,
model_kwargs={"quantization_config": quantization_config},
generate_kwargs={"temperature": 0.2, "top_k": 5, "top_p": 0.95},
device_map="auto",
)
model.build_index()
past_queries = (
pl.read_csv("data/logs/queries.csv").filter(pl.col("column") != "").head(100)
)
fails = ["supercars"] # these cases should always output 'false'
queries = [
"social mobility",
"mobility",
"diabetes",
"health",
"liverpool",
"london",
"covid",
"greenspace",
] + fails
queries.extend([f"{query} datasets" for query in queries])
queries.extend([f"datasets relating to {query}" for query in queries])
queries.extend(past_queries["column"].to_list())
alpha_values = [0.0, 0.75, 1.0]
results = []
for alpha in tqdm(alpha_values):
for query in tqdm(queries):
query
model.alpha = alpha
model.run(query)
evaluator = RelevancyEvaluator(service_context=model.service_context)
contexts = [node.get_content() for node in model.response]
eval_result = evaluator.evaluate(
query=query,
contexts=contexts,
response="",
)
results.append({"result": eval_result.passing, "alpha": alpha, "query": query})
df = pl.DataFrame(results).with_columns(
pl.col("alpha").cast(str), pl.col("result").cast(str)
)
df.write_csv("data/evaluation/evaluation.csv")
df = pl.read_csv("data/evaluation/evaluation.csv").with_columns(
pl.col("alpha").cast(str), pl.col("result").cast(str)
)
sns.histplot(
data=df,
x="alpha",
hue="result",
multiple="stack",
shrink=0.8,
palette="gray",
)
plt.save("./data/evaluation/plot.png")
| [
"llama_index.evaluation.RelevancyEvaluator",
"llama_index.prompts.PromptTemplate"
] | [((376, 415), 'polars.Config.set_tbl_formatting', 'pl.Config.set_tbl_formatting', (['"""NOTHING"""'], {}), "('NOTHING')\n", (404, 415), True, 'import polars as pl\n'), ((416, 441), 'polars.Config.set_tbl_rows', 'pl.Config.set_tbl_rows', (['(4)'], {}), '(4)\n', (438, 441), True, 'import polars as pl\n'), ((535, 579), 'src.model.LlamaIndexModel', 'LlamaIndexModel', ([], {'load_model': '(True)'}), '(**settings, load_model=True)\n', (550, 579), False, 'from src.model import LlamaIndexModel\n'), ((604, 742), 'transformers.BitsAndBytesConfig', 'BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_compute_dtype': 'torch.float16', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_use_double_quant': '(True)'}), "(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True)\n", (622, 742), False, 'from transformers import BitsAndBytesConfig\n'), ((1766, 1784), 'tqdm.tqdm', 'tqdm', (['alpha_values'], {}), '(alpha_values)\n', (1770, 1784), False, 'from tqdm import tqdm\n'), ((2526, 2622), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'df', 'x': '"""alpha"""', 'hue': '"""result"""', 'multiple': '"""stack"""', 'shrink': '(0.8)', 'palette': '"""gray"""'}), "(data=df, x='alpha', hue='result', multiple='stack', shrink=0.8,\n palette='gray')\n", (2538, 2622), True, 'import seaborn as sns\n'), ((2646, 2684), 'matplotlib.pyplot.save', 'plt.save', (['"""./data/evaluation/plot.png"""'], {}), "('./data/evaluation/plot.png')\n", (2654, 2684), True, 'import matplotlib.pyplot as plt\n'), ((1803, 1816), 'tqdm.tqdm', 'tqdm', (['queries'], {}), '(queries)\n', (1807, 1816), False, 'from tqdm import tqdm\n'), ((923, 977), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['"""<s>[INST] {query_str} [/INST] </s>\n"""'], {}), "('<s>[INST] {query_str} [/INST] </s>\\n')\n", (937, 977), False, 'from llama_index.prompts import PromptTemplate\n'), ((1905, 1962), 'llama_index.evaluation.RelevancyEvaluator', 'RelevancyEvaluator', ([], {'service_context': 'model.service_context'}), '(service_context=model.service_context)\n', (1923, 1962), False, 'from llama_index.evaluation import RelevancyEvaluator\n'), ((2257, 2278), 'polars.DataFrame', 'pl.DataFrame', (['results'], {}), '(results)\n', (2269, 2278), True, 'import polars as pl\n'), ((2405, 2450), 'polars.read_csv', 'pl.read_csv', (['"""data/evaluation/evaluation.csv"""'], {}), "('data/evaluation/evaluation.csv')\n", (2416, 2450), True, 'import polars as pl\n'), ((454, 464), 'src.common.utils.Settings', 'Settings', ([], {}), '()\n', (462, 464), False, 'from src.common.utils import Settings\n'), ((2297, 2312), 'polars.col', 'pl.col', (['"""alpha"""'], {}), "('alpha')\n", (2303, 2312), True, 'import polars as pl\n'), ((2324, 2340), 'polars.col', 'pl.col', (['"""result"""'], {}), "('result')\n", (2330, 2340), True, 'import polars as pl\n'), ((2469, 2484), 'polars.col', 'pl.col', (['"""alpha"""'], {}), "('alpha')\n", (2475, 2484), True, 'import polars as pl\n'), ((2496, 2512), 'polars.col', 'pl.col', (['"""result"""'], {}), "('result')\n", (2502, 2512), True, 'import polars as pl\n'), ((1228, 1264), 'polars.read_csv', 'pl.read_csv', (['"""data/logs/queries.csv"""'], {}), "('data/logs/queries.csv')\n", (1239, 1264), True, 'import polars as pl\n'), ((1272, 1288), 'polars.col', 'pl.col', (['"""column"""'], {}), "('column')\n", (1278, 1288), True, 'import polars as pl\n')] |
import uuid
from llama_index import (StorageContext, VectorStoreIndex, download_loader,
load_index_from_storage)
from llama_index.memory import ChatMemoryBuffer
def create_index_and_query(transcript_id: str, full_transcription: any):
persist_dir = f'./storage/cache/transcription/{transcript_id}'
try:
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
index = load_index_from_storage(storage_context)
print('loading from disk')
except:
JsonDataReader = download_loader("JsonDataReader")
loader = JsonDataReader()
documents = loader.load_data(full_transcription)
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=persist_dir)
print('creating on disk')
return index
def create_chat_engine(indexStorage: any):
global chat_engines
chat_id = str(uuid.uuid4())
memory = ChatMemoryBuffer.from_defaults(token_limit=2000)
chat_engine = indexStorage.as_chat_engine(
chat_mode="context",
memory=memory,
system_prompt=(
"You are a chatbot, able to have normal interactions, as well as talk"
# " about an essay discussing Paul Grahams life."
),
)
chat_engines[chat_id] = chat_engine
return chat_id
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.StorageContext.from_defaults",
"llama_index.download_loader",
"llama_index.load_index_from_storage",
"llama_index.memory.ChatMemoryBuffer.from_defaults"
] | [((963, 1011), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(2000)'}), '(token_limit=2000)\n', (993, 1011), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((365, 418), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (393, 418), False, 'from llama_index import StorageContext, VectorStoreIndex, download_loader, load_index_from_storage\n'), ((435, 475), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (458, 475), False, 'from llama_index import StorageContext, VectorStoreIndex, download_loader, load_index_from_storage\n'), ((934, 946), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (944, 946), False, 'import uuid\n'), ((548, 581), 'llama_index.download_loader', 'download_loader', (['"""JsonDataReader"""'], {}), "('JsonDataReader')\n", (563, 581), False, 'from llama_index import StorageContext, VectorStoreIndex, download_loader, load_index_from_storage\n'), ((689, 731), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (720, 731), False, 'from llama_index import StorageContext, VectorStoreIndex, download_loader, load_index_from_storage\n')] |
import glob
import os
import re
from PIL import Image
from io import BytesIO
from openai import OpenAI
from llama_index.node_parser import MarkdownNodeParser
from llama_index import ServiceContext, VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings import OpenAIEmbedding
from llama_index import download_loader
from llama_index.indices.multi_modal.base import MultiModalVectorStoreIndex
from pathlib import Path
import requests
parser = MarkdownNodeParser(include_metadata=True, include_prev_next_rel=True)
client = OpenAI(
api_key=os.environ["OPENAI_API_KEY"]
)
class HybridIndex():
def __init__(self, markdown_file):
MarkdownReader = download_loader("MarkdownReader")
loader = MarkdownReader()
documents = loader.load_data(file=Path(markdown_file))
embed_model = OpenAIEmbedding()
ServiceContext.from_defaults(embed_model=embed_model)
index = VectorStoreIndex.from_documents(documents)
self.text_retriever = index.as_retriever(similarity_top_k=3)
def retrieve_text(self, text):
return "\n\n".join([
self.text_retriever.retrieve(text)[k].get_content()
for k in range(3)
])
class HybridIndex2():
def __init__(self, markdown_file, savedir):
self.setup_text_retriever(markdown_file)
self.setup_img_retriever(markdown_file, savedir)
def setup_img_retriever(self, markdown_file, savedir):
image_dir = os.path.join(savedir, 'images')
with open(markdown_file, 'r') as file:
text = file.read()
images = re.findall(r"<img src=\"([^\s-]*)\"", text)
print("images", images)
idx = 0
for image in images:
response = requests.get(image)
img = Image.open(BytesIO(response.content))
os.makedirs(image_dir, exist_ok=True)
img.save(os.path.join(image_dir, f"{idx}.png"))
idx += 1
glob.glob(os.path.join(savedir, '*.png'))
documents = SimpleDirectoryReader(image_dir).load_data()
index = MultiModalVectorStoreIndex.from_documents(documents)
self.image_retriever = index.as_retriever()
def setup_text_retriever(self, markdown_file):
MarkdownReader = download_loader("MarkdownReader")
loader = MarkdownReader()
documents = loader.load_data(file=Path(markdown_file))
embed_model = OpenAIEmbedding()
ServiceContext.from_defaults(embed_model=embed_model)
text_index = VectorStoreIndex.from_documents(documents)
self.text_retriever = text_index.as_retriever(similarity_top_k=3)
def retrieve_text(self, text, topk=3):
return "\n\n".join([
self.text_retriever.retrieve(text)[k].get_content()
for k in range(3)
])
def retrieve_img(self, text, topk=1):
return self.image_retriever.retrieve(text)[0].to_dict()['node']['metadata']['file_path']
TEXT_INDEX = HybridIndex2(
markdown_file="/Users/neel/Desktop/rasa-hackathon/data/reference_text.md",
savedir="/Users/neel/Desktop/rasa-hackathon/data"
)
SYSTEM_PROMPT = """\
You are an intelligent digital assistant working with a user who is preparing a presentation. They are iteratively using you to make calls to a retriever information to use in their presentation. You also take the retrieved information and synthesize that information with their text to make calls the frontend API to navigate between and create slides for the user. Your task is to interpret the user's intent and use the given tools as needed to accomplish the task."""
USER_PROMPT = """\
The user said "{user_text}"
Given the above user text, call the right tool for this task.
If you are using update_markdown_slide without providing an image, DO NOT attempt to include an image URL - remove it if needed.
When in doubt, choose the update_markdown_slide tool.
"""
def choose_tool(whisper_prompt):
completion = client.chat.completions.create(
model="gpt-4-1106-preview",
max_tokens=1000,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": USER_PROMPT.format(user_text=whisper_prompt)}
],
temperature=0,
tools=[
{
"type": "function",
"function": {
"name": "add_slide",
"description": "Choose this tool to add a new blank slide only if asked to.",
}
},
{
"type": "function",
"function": {
"name": "choose_slide",
"description": "This is a tool that can choose a slide.",
"parameters": {
"type": "object",
"title": "SlideInputs",
"required": ["index"],
"properties": {
"index": {
"type": "integer",
"title": "index",
"description": "Slide to choose"
}
}
},
}
},
{
"type": "function",
"function": {
"name": "update_markdown_slide",
"description": "This is a tool that can update a markdown slide.",
"parameters": {
"type": "object",
"title": "MarkdownSlideInput",
"required": ["query"],
"properties": {
"query": {
"type": "string",
"title": "Query",
"description": "The query to generate the slide from"
},
"provide_image": {
"type": "boolean",
"title": "Should provide an image to fulfill the request",
"description": "Choose True if you want to provide an image to fullfill the request"
},
}
},
}
},
]
)
return completion.choices[0].message.tool_calls[0]
def get_image(image_prompt):
return TEXT_INDEX.retrieve_img(image_prompt)
def make_slide(whisper_prompt, provide_image):
if provide_image:
return {'image': get_image(whisper_prompt), 'slide_index': 0}
return {'markdown': generate_markdown(whisper_prompt), 'slide_index': 0}
GENERATE_MD_PROMPT = """\
Your task is to generate a markdown slide. The markdown you generate always starts with a title. This is an example.
# Slide 1
This is some text
## This is a subheading
- This is a list
- This is a list
- This is a list
### This is a subsubheading
1. This is an ordered list
2. This is an ordered list
Now do this by synthesizing the following context with the prompt:
This is the context:
---
{context}
---
This is the prompt:
---
{whisper_prompt}
---\
"""
FEEDBACK_PROMPT = """
Here is what you have done so far:
{response}
Tell me what you have done so far and ask what should be done next.
"""
def generate_feedback(response):
completion = client.chat.completions.create(
model="gpt-4-1106-preview",
max_tokens=1000,
messages=[
{"role": "system", "content": """You are a AI assistant responder."""},
{"role": "user", "content": FEEDBACK_PROMPT.format(response=response)}
],
temperature=0,
)
response = completion.choices[0].message.content
return response
def generate_markdown(whisper_prompt):
context = TEXT_INDEX.retrieve_text(whisper_prompt)
completion = client.chat.completions.create(
model="gpt-4-1106-preview",
max_tokens=1000,
messages=[
{"role": "system", "content": """You are a markdown slides generation pro."""},
{"role": "user", "content": GENERATE_MD_PROMPT.format(context=context, whisper_prompt=whisper_prompt)}
],
temperature=0,
tools=[
{
"type": "function",
"function": {
"name": "make_markdown_slide",
"description": "This is a tool that can make a markdown slide.",
"parameters": {
"type": "object",
"title": "MarkdownSlideInput",
"required": ["markdown"],
"properties": {
"markdown": {
"type": "string",
"title": "Markdown",
"description": "The markdown for the slide"
}
}
},
}
},
]
)
return eval(completion.choices[0].message.tool_calls[0].function.arguments)['markdown']
def main():
#res = process_whisper_prompt("Add a title to the slide 'Hello World'")
res = generate_markdown("Let's get the founding story")
print(res)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.node_parser.MarkdownNodeParser",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.download_loader",
"llama_index.indices.multi_modal.base.MultiModalVectorStoreIndex.from_documents",
"llama_index.embeddings.OpenAIEmbedding"
] | [((455, 524), 'llama_index.node_parser.MarkdownNodeParser', 'MarkdownNodeParser', ([], {'include_metadata': '(True)', 'include_prev_next_rel': '(True)'}), '(include_metadata=True, include_prev_next_rel=True)\n', (473, 524), False, 'from llama_index.node_parser import MarkdownNodeParser\n'), ((535, 579), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "os.environ['OPENAI_API_KEY']"}), "(api_key=os.environ['OPENAI_API_KEY'])\n", (541, 579), False, 'from openai import OpenAI\n'), ((682, 715), 'llama_index.download_loader', 'download_loader', (['"""MarkdownReader"""'], {}), "('MarkdownReader')\n", (697, 715), False, 'from llama_index import download_loader\n'), ((835, 852), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (850, 852), False, 'from llama_index.embeddings import OpenAIEmbedding\n'), ((861, 914), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (889, 914), False, 'from llama_index import ServiceContext, VectorStoreIndex, SimpleDirectoryReader\n'), ((931, 973), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (962, 973), False, 'from llama_index import ServiceContext, VectorStoreIndex, SimpleDirectoryReader\n'), ((1492, 1523), 'os.path.join', 'os.path.join', (['savedir', '"""images"""'], {}), "(savedir, 'images')\n", (1504, 1523), False, 'import os\n'), ((2349, 2382), 'llama_index.download_loader', 'download_loader', (['"""MarkdownReader"""'], {}), "('MarkdownReader')\n", (2364, 2382), False, 'from llama_index import download_loader\n'), ((2502, 2519), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (2517, 2519), False, 'from llama_index.embeddings import OpenAIEmbedding\n'), ((2528, 2581), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (2556, 2581), False, 'from llama_index import ServiceContext, VectorStoreIndex, SimpleDirectoryReader\n'), ((2603, 2645), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2634, 2645), False, 'from llama_index import ServiceContext, VectorStoreIndex, SimpleDirectoryReader\n'), ((1623, 1668), 're.findall', 're.findall', (['"""<img src=\\\\"([^\\\\s-]*)\\\\\\""""', 'text'], {}), '(\'<img src=\\\\"([^\\\\s-]*)\\\\"\', text)\n', (1633, 1668), False, 'import re\n'), ((2163, 2215), 'llama_index.indices.multi_modal.base.MultiModalVectorStoreIndex.from_documents', 'MultiModalVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2204, 2215), False, 'from llama_index.indices.multi_modal.base import MultiModalVectorStoreIndex\n'), ((792, 811), 'pathlib.Path', 'Path', (['markdown_file'], {}), '(markdown_file)\n', (796, 811), False, 'from pathlib import Path\n'), ((1783, 1802), 'requests.get', 'requests.get', (['image'], {}), '(image)\n', (1795, 1802), False, 'import requests\n'), ((1879, 1916), 'os.makedirs', 'os.makedirs', (['image_dir'], {'exist_ok': '(True)'}), '(image_dir, exist_ok=True)\n', (1890, 1916), False, 'import os\n'), ((2029, 2059), 'os.path.join', 'os.path.join', (['savedir', '"""*.png"""'], {}), "(savedir, '*.png')\n", (2041, 2059), False, 'import os\n'), ((2459, 2478), 'pathlib.Path', 'Path', (['markdown_file'], {}), '(markdown_file)\n', (2463, 2478), False, 'from pathlib import Path\n'), ((1836, 1861), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (1843, 1861), False, 'from io import BytesIO\n'), ((1942, 1979), 'os.path.join', 'os.path.join', (['image_dir', 'f"""{idx}.png"""'], {}), "(image_dir, f'{idx}.png')\n", (1954, 1979), False, 'import os\n'), ((2085, 2117), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['image_dir'], {}), '(image_dir)\n', (2106, 2117), False, 'from llama_index import ServiceContext, VectorStoreIndex, SimpleDirectoryReader\n')] |
import streamlit as st
import redirect as rd
import os
import tempfile
import time
from llama_index import StorageContext, LLMPredictor
from llama_index import TreeIndex, load_index_from_storage
from llama_index import ServiceContext
from langchain.prompts import StringPromptTemplate
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish
from langchain.agents import AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain import LLMChain, OpenAI
from llama_index.indices.tree.tree_root_retriever import TreeRootRetriever
import re
from langchain.chat_models import ChatOpenAI
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.query_engine import MultiStepQueryEngine
from langchain.agents import Tool
from llama_index.query_engine import RetrieverQueryEngine
import openai
# import nest_asyncio
# nest_asyncio.apply()
def call_openai_api(*args, **kwargs):
return openai.ChatCompletion.create(*args, **kwargs)
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
openai.api_key = st.secrets["OPENAI_API_KEY"]
query_engine_tools = []
import asyncio
def get_or_create_eventloop():
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
def remove_formatting(output):
output = re.sub('\[[0-9;m]+', '', output)
output = re.sub('\', '', output)
return output.strip()
@st.cache_resource
def preprocessing():
names = ["The Insurance Act, 1938: Regulations and Restrictions for Insurance Companies in India"]
names.append('Overview of Pradhan Mantri Beema Yojana')
names.append('Restructured Weather Based Crop Insurance and Coconut Palm Insurance Schemes')
names.append('Unified Package Insurance Scheme: Financial Protection for Agriculture Sector')
descriptions = ["The go-to document for Insurance Rules. The Insurance Act, 1938 is an Act to consolidate and amend the law relating to the business of insurance in India. It outlines the regulations for insurance companies, including registration, capital requirements, investment, loans and management, investigation, appointment of staff, control over management, amalgamation and transfer of insurance business, commission and rebates, licensing of agents, management by administrator, and acquisition of the undertakings of insurers in certain cases. It also outlines the voting rights of shareholders, the requirements for making a declaration of interest in a share held in the name of another person, the requirements for the separation of accounts and funds for different classes of insurance business, the audit and actuarial report and abstract that must be conducted annually, the power of the Authority to order revaluation and to inspect returns, the power of the Authority to make rules and regulations, the power of the Authority to remove managerial persons from office, appoint additional directors, and issue directions regarding re-insurance treaties, the power of the Authority to enter and search any building or place where books, accounts, or other documents relating to any claim, rebate, or commission are kept, the prohibition of cessation of payments of commission, the prohibition of offering of rebates as an inducement to take out or renew an insurance policy, the process for issuing a registration to act as an intermediary or insurance intermediary, the process for repudiating a life insurance policy on the ground of fraud, the prohibition of insurance agents, intermediaries, or insurance intermediaries to be or remain a director in an insurance company, the requirement to give notice to the policy-holder informing them of the options available to them on the lapsing of a policy, and the power of the National Company Law Tribunal to order the winding up of an insurance company. Penalties for non-compliance range from fines to imprisonment. The Act also outlines the formation of the Life Insurance Council and General Insurance Council, and the Executive Committees of each, the Tariff Advisory Committee, and the obligations of insurers in respect of rural or social or unorganized sector and backward classes."]
descriptions.append("Pradhan Mantri Beema Yojana is a scheme implemented by the Government of India to provide insurance coverage and financial support to farmers in the event of crop failure due to natural calamities, pests & diseases. The scheme covers all crops for which past yield data is available, and risk coverage includes yield losses, prevented sowing, post-harvest losses, and localized calamities. It also offers coverage for personal assets of the farmer, such as dwellings and its contents, and other assets that help the farmer earn a livelihood, such as agricultural pump sets and tractors. The scheme includes seven sections, with crop insurance being mandatory, and the farmer's share of the premium ranges from to 5%. It also includes a Weather Based Crop Insurance Scheme, a Unified Package Insurance Scheme, and a centralized repository. In addition, it offers personal accident insurance, student safety insurance, and life insurance.")
descriptions.append("This document outlines the Restructured Weather Based Crop Insurance Scheme (RWBCIS) and Coconut Palm Insurance Scheme (CPIS). The RWBCIS includes operational guidelines and administrative approval issued by the Department of Agriculture, Cooperation and Farmers Welfare (DAC & FW) and the State Government. The CPIS includes operational guidelines issued by the DAC & FW. The scheme covers food crops (cereals, millets, and pulses), oilseeds, and commercial/horticultural crops. The risk period for the scheme is from sowing period to maturity of the crop and is notified by the State Level Crop Cutting and Insurance Committee (SLCCCI). The scheme requires notification from the State/UT Government, which must include details of crops and reference unit areas, applicable sum insured, premium rates, and subsidy. Claims are assessed based on weather data recorded by the notified Reference Weather Stations (RWS) or Back-up Weather Stations (BWS). The scheme also includes a Term Sheet, which outlines the cover phases, strike and exit values, standard loss rates, and policy limits.")
descriptions.append("The Unified Package Insurance Scheme (UPIS) is a financial protection program for citizens associated with the agriculture sector, implemented in 45 selected districts on a pilot basis from Kharif 2016 season. Eligibility for the scheme includes savings bank account holders aged between 18 and 50 years, with an assurance of Rs. 2,00,000 on death of the insured member. The policy provides comprehensive cover for agriculture tractors of up to 10 years and 45 HP, and third party cover with no age limit. In the event of damage, farmers must intimate the insurance company within 48 hours and submit the claim form and other relevant documents within 15 days of the survey. The policy excludes any accidental loss or damage outside the geographical area, any claim arising out of any contractual liability, and any loss or damage caused by depreciation or wear and tear.")
temp = ['insurance', 'pmby', 'rwbcis', 'upis']
for n, x in enumerate(temp):
storage_context = StorageContext.from_defaults(
persist_dir = x,
)
index = load_index_from_storage(storage_context)
engine = index.as_query_engine(similarity_top_k = 3)
query_engine_tools.append(QueryEngineTool(
query_engine = engine,
metadata = ToolMetadata(name = names[n], description = descriptions[n])
))
st.header('Document Headings and Descriptions -')
for i in range(4):
st.subheader(f"{i + 1}) " + names[i])
st.write(descriptions[i])
s_engine = MultiStepQueryEngine.from_defaults(query_engine_tools = query_engine_tools)
tools = [Tool(
name = "Llama-Index",
func = s_engine.query,
description = f"Useful for when you want to answer questions. The input to this tool should be a complete English sentence. Works best if you redirect the entire query back into this. This is an AI Assistant, ask complete questions, articulate well.",
return_direct = True
)
]
template1 = """You are a Smart Insurance Agent Assistant. The Agent will ask you domain specific questions. The tools provided to you have smart interpretibility if you specify keywords in your query to the tool [Example a query for two wheeler insurance rules should mention two wheelers]. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action, a complete English sentence
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to be ethical and articulate when giving your final answer. Use lots of "Arg"s
Question: {input}
{agent_scratchpad}"""
prompt = CustomPromptTemplate(
template = template1,
tools = tools,
input_variables=["input", "intermediate_steps"]
)
output_parser = CustomOutputParser()
llm = OpenAI(temperature = 0)
llm_chain = LLMChain(llm = llm, prompt = prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain = llm_chain,
output_parser = output_parser,
stop = ["\nObservation:"],
allowed_tools = tool_names
)
agent_chain = AgentExecutor.from_agent_and_tools(tools = tools, agent = agent, verbose = True)
return agent_chain
@st.cache_resource
def run(query):
if query:
with rd.stdout() as out:
ox = agent_chain.run(query)
output = out.getvalue()
output = remove_formatting(output)
st.write(ox.response)
return True
class CustomPromptTemplate(StringPromptTemplate):
template: str
tools: List[Tool]
def format(self, **kwargs) -> str:
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
kwargs["agent_scratchpad"] = thoughts
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
return self.template.format(**kwargs)
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
if "Final Answer:" in llm_output:
return AgentFinish(
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
st.set_page_config(layout = "wide")
st.title("Agriculture Web App")
# st.markdown('_The headings and descriptions given below are generated using LLMs._')
llm_predictor = LLMPredictor(llm = ChatOpenAI(temperature = 0, model_name = 'gpt-3.5-turbo', max_tokens = -1))
storage_context = StorageContext.from_defaults()
service_context = ServiceContext.from_defaults(llm_predictor = llm_predictor)
agent_chain = preprocessing()
ack = False
if agent_chain:
query = st.text_input('Enter your Query.', key = 'query_input')
ack = run(query)
if ack:
ack = False
query = st.text_input('Enter your Query.', key = 'new_query_input')
ack = run(query)
if ack:
ack = False
query = st.text_input('Enter your Query.', key = 'new_query_input1')
ack = run(query)
if ack:
ack = False
query = st.text_input('Enter your Query.', key = 'new_query_input2')
ack = run(query)
if ack:
ack = False
query = st.text_input('Enter your Query.', key = 'new_query_input3')
ack = run(query)
if ack:
ack = False
query = st.text_input('Enter your Query.', key = 'new_query_input4')
ack = run(query)
if ack:
ack = False
query = st.text_input('Enter your Query.', key = 'new_query_input5')
ack = run(query)
| [
"llama_index.tools.ToolMetadata",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.MultiStepQueryEngine.from_defaults",
"llama_index.load_index_from_storage"
] | [((11873, 11906), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (11891, 11906), True, 'import streamlit as st\n'), ((11910, 11941), 'streamlit.title', 'st.title', (['"""Agriculture Web App"""'], {}), "('Agriculture Web App')\n", (11918, 11941), True, 'import streamlit as st\n'), ((12160, 12190), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (12188, 12190), False, 'from llama_index import StorageContext, LLMPredictor\n'), ((12209, 12266), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (12237, 12266), False, 'from llama_index import ServiceContext\n'), ((945, 990), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', (['*args'], {}), '(*args, **kwargs)\n', (973, 990), False, 'import openai\n'), ((1490, 1523), 're.sub', 're.sub', (['"""\\\\[[0-9;m]+"""', '""""""', 'output'], {}), "('\\\\[[0-9;m]+', '', output)\n", (1496, 1523), False, 'import re\n'), ((1538, 1566), 're.sub', 're.sub', (['"""\\\\\x1b"""', '""""""', 'output'], {}), "('\\\\\\x1b', '', output)\n", (1544, 1566), False, 'import re\n'), ((7810, 7859), 'streamlit.header', 'st.header', (['"""Document Headings and Descriptions -"""'], {}), "('Document Headings and Descriptions -')\n", (7819, 7859), True, 'import streamlit as st\n'), ((7980, 8053), 'llama_index.query_engine.MultiStepQueryEngine.from_defaults', 'MultiStepQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools'}), '(query_engine_tools=query_engine_tools)\n', (8014, 8053), False, 'from llama_index.query_engine import MultiStepQueryEngine\n'), ((9793, 9814), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (9799, 9814), False, 'from langchain import LLMChain, OpenAI\n'), ((9833, 9865), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (9841, 9865), False, 'from langchain import LLMChain, OpenAI\n'), ((9930, 10056), 'langchain.agents.LLMSingleActionAgent', 'LLMSingleActionAgent', ([], {'llm_chain': 'llm_chain', 'output_parser': 'output_parser', 'stop': "['\\nObservation:']", 'allowed_tools': 'tool_names'}), "(llm_chain=llm_chain, output_parser=output_parser, stop\n =['\\nObservation:'], allowed_tools=tool_names)\n", (9950, 10056), False, 'from langchain.agents import AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((10119, 10193), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'tools': 'tools', 'agent': 'agent', 'verbose': '(True)'}), '(tools=tools, agent=agent, verbose=True)\n', (10153, 10193), False, 'from langchain.agents import AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((12341, 12394), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""query_input"""'}), "('Enter your Query.', key='query_input')\n", (12354, 12394), True, 'import streamlit as st\n'), ((1194, 1218), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1216, 1218), False, 'import asyncio\n'), ((7438, 7481), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'x'}), '(persist_dir=x)\n', (7466, 7481), False, 'from llama_index import StorageContext, LLMPredictor\n'), ((7523, 7563), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (7546, 7563), False, 'from llama_index import TreeIndex, load_index_from_storage\n'), ((7892, 7929), 'streamlit.subheader', 'st.subheader', (["(f'{i + 1}) ' + names[i])"], {}), "(f'{i + 1}) ' + names[i])\n", (7904, 7929), True, 'import streamlit as st\n'), ((7938, 7963), 'streamlit.write', 'st.write', (['descriptions[i]'], {}), '(descriptions[i])\n', (7946, 7963), True, 'import streamlit as st\n'), ((8070, 8395), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Llama-Index"""', 'func': 's_engine.query', 'description': 'f"""Useful for when you want to answer questions. The input to this tool should be a complete English sentence. Works best if you redirect the entire query back into this. This is an AI Assistant, ask complete questions, articulate well."""', 'return_direct': '(True)'}), "(name='Llama-Index', func=s_engine.query, description=\n f'Useful for when you want to answer questions. The input to this tool should be a complete English sentence. Works best if you redirect the entire query back into this. This is an AI Assistant, ask complete questions, articulate well.'\n , return_direct=True)\n", (8074, 8395), False, 'from langchain.agents import Tool\n'), ((10434, 10455), 'streamlit.write', 'st.write', (['ox.response'], {}), '(ox.response)\n', (10442, 10455), True, 'import streamlit as st\n'), ((11553, 11592), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (11562, 11592), False, 'import re\n'), ((12065, 12133), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': '(-1)'}), "(temperature=0, model_name='gpt-3.5-turbo', max_tokens=-1)\n", (12075, 12133), False, 'from langchain.chat_models import ChatOpenAI\n'), ((12466, 12523), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""new_query_input"""'}), "('Enter your Query.', key='new_query_input')\n", (12479, 12523), True, 'import streamlit as st\n'), ((10291, 10302), 'redirect.stdout', 'rd.stdout', ([], {}), '()\n', (10300, 10302), True, 'import redirect as rd\n'), ((12611, 12669), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""new_query_input1"""'}), "('Enter your Query.', key='new_query_input1')\n", (12624, 12669), True, 'import streamlit as st\n'), ((1335, 1359), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (1357, 1359), False, 'import asyncio\n'), ((1372, 1400), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (1394, 1400), False, 'import asyncio\n'), ((1420, 1444), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1442, 1444), False, 'import asyncio\n'), ((12773, 12831), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""new_query_input2"""'}), "('Enter your Query.', key='new_query_input2')\n", (12786, 12831), True, 'import streamlit as st\n'), ((7734, 7790), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'names[n]', 'description': 'descriptions[n]'}), '(name=names[n], description=descriptions[n])\n', (7746, 7790), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((12951, 13009), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""new_query_input3"""'}), "('Enter your Query.', key='new_query_input3')\n", (12964, 13009), True, 'import streamlit as st\n'), ((13145, 13203), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""new_query_input4"""'}), "('Enter your Query.', key='new_query_input4')\n", (13158, 13203), True, 'import streamlit as st\n'), ((13355, 13413), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""new_query_input5"""'}), "('Enter your Query.', key='new_query_input5')\n", (13368, 13413), True, 'import streamlit as st\n')] |
# Required Environment Variables: OPENAI_API_KEY
# Required TavilyAI API KEY for web searches - https://tavily.com/
from llama_index.core import SimpleDirectoryReader
from llama_index.packs.corrective_rag import CorrectiveRAGPack
# load documents
documents = SimpleDirectoryReader("./data").load_data()
# uses the LLM to extract propositions from every document/node!
corrective_rag = CorrectiveRAGPack(documents, tavily_ai_apikey="<tavily_ai_apikey>")
# run the pack
response = corrective_rag.run("<Query>")
print(response)
| [
"llama_index.packs.corrective_rag.CorrectiveRAGPack",
"llama_index.core.SimpleDirectoryReader"
] | [((387, 454), 'llama_index.packs.corrective_rag.CorrectiveRAGPack', 'CorrectiveRAGPack', (['documents'], {'tavily_ai_apikey': '"""<tavily_ai_apikey>"""'}), "(documents, tavily_ai_apikey='<tavily_ai_apikey>')\n", (404, 454), False, 'from llama_index.packs.corrective_rag import CorrectiveRAGPack\n'), ((260, 291), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {}), "('./data')\n", (281, 291), False, 'from llama_index.core import SimpleDirectoryReader\n')] |
# LLama Index starter example from: https://gpt-index.readthedocs.io/en/latest/getting_started/starter_example.html
# In order to run this, download into data/ Paul Graham's Essay 'What I Worked On' from
# https://github.com/jerryjliu/llama_index/blob/main/examples/paul_graham_essay/data/paul_graham_essay.txt
# curl https://raw.githubusercontent.com/jerryjliu/llama_index/main/examples/paul_graham_essay/data/paul_graham_essay.txt > data/paul_graham_essay.txt
import json
from dotenv import load_dotenv
import os
import pprint
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from llama_index import StorageContext, load_index_from_storage
from llama_index.node_parser import SimpleNodeParser
from llama_index.schema import TextNode, NodeRelationship, RelatedNodeInfo
load_dotenv()
pp = pprint.PrettyPrinter(indent=4).pprint
def make_index():
print('Loading documents...')
documents = SimpleDirectoryReader('data').load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist()
def load_index():
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
# load index
index = load_index_from_storage(storage_context)
return index
def read_doc():
with open('data/worked_on.txt') as f:
doc = f.read()
return doc
def get_lines():
doc = read_doc()
lines = []
for line in doc.split('\n'):
line = line.strip().strip().strip().strip()
if len(line) == 0:
continue
lines.append(line)
print('lines', json.dumps(lines, indent=2))
return lines
# make an index from lines -> nodes -> index
def index_from_lines(lines):
count = 0
nodes = []
for idx, line in enumerate(lines):
node = TextNode(text=line, id_=idx)
print('----\n', line)
nodes.append(node)
for idx, node in enumerate(nodes):
if idx < len(nodes) - 1:
next = nodes[idx+1]
node.relationships[NodeRelationship.NEXT] = RelatedNodeInfo(node_id=next.node_id)
if idx > 0:
prev = nodes[idx-1]
node.relationships[NodeRelationship.PREVIOUS] = RelatedNodeInfo(node_id=prev.node_id)
index = VectorStoreIndex(nodes)
return index
def get_nodes():
parser = SimpleNodeParser()
documents = SimpleDirectoryReader('data').load_data()
nodes = parser.get_nodes_from_documents(documents)
count = 0
for node in nodes:
print('\n--- node', count)
print(vars(node))
pp(node)
# print(json.dumps(vars(node), indent=2))
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.schema.TextNode",
"llama_index.StorageContext.from_defaults",
"llama_index.schema.RelatedNodeInfo",
"llama_index.VectorStoreIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.load_index_from_storage"
] | [((789, 802), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (800, 802), False, 'from dotenv import load_dotenv\n'), ((809, 839), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (829, 839), False, 'import pprint\n'), ((970, 1012), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1001, 1012), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1120, 1173), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1148, 1173), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1203, 1243), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1226, 1243), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((2243, 2266), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {}), '(nodes)\n', (2259, 2266), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2316, 2334), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (2332, 2334), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1593, 1620), 'json.dumps', 'json.dumps', (['lines'], {'indent': '(2)'}), '(lines, indent=2)\n', (1603, 1620), False, 'import json\n'), ((1797, 1825), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'line', 'id_': 'idx'}), '(text=line, id_=idx)\n', (1805, 1825), False, 'from llama_index.schema import TextNode, NodeRelationship, RelatedNodeInfo\n'), ((916, 945), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (937, 945), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2043, 2080), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'next.node_id'}), '(node_id=next.node_id)\n', (2058, 2080), False, 'from llama_index.schema import TextNode, NodeRelationship, RelatedNodeInfo\n'), ((2193, 2230), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'prev.node_id'}), '(node_id=prev.node_id)\n', (2208, 2230), False, 'from llama_index.schema import TextNode, NodeRelationship, RelatedNodeInfo\n'), ((2351, 2380), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (2372, 2380), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n')] |
import sounddevice as sd
import wavio
import whisper
import openai
from llama_index.llms import LlamaCPP
from llama_index.llms.base import ChatMessage
def record_audio(output_filename, duration, sample_rate):
print("Recording...")
audio_data = sd.rec(int(duration * sample_rate),
samplerate=sample_rate, channels=1)
sd.wait() # Wait until recording is finished
print("Recording finished.")
# Save the recorded audio to a WAV file
wavio.write(output_filename, audio_data, sample_rate, sampwidth=2)
def transcribe_audio(audio_file):
model = whisper.load_model('base')
text = model.transcribe(audio_file)
return text['text']
def check_grammar_and_format(text):
path = r'C:\Users\vikra\llama.cpp\llama-2-13b-chat.ggmlv3.q4_0.bin'
llm_gpt = LlamaCPP(model_path=path)
message = ChatMessage(role='user', content=f'check grammar and the correct format for the following: {text}')
return llm_gpt.chat([message])
def main():
print("Speech-to-Text and Grammar Checking")
recording_duration = 5
output_file = "recorded_audio.wav"
sample_rate = 44100
record_audio(output_file, recording_duration, sample_rate)
print("Audio saved as:", output_file)
if not sd.query_devices(None, 'input')['default_samplerate'] == sample_rate:
print("Warning: The sample rate of the input device is not set to", sample_rate)
transcribed_text = transcribe_audio(output_file)
print("Transcribed Text:", transcribed_text)
grammar_check_result = check_grammar_and_format(transcribed_text)
print("Grammar Check Result:", grammar_check_result)
if __name__ == "__main__":
main()
| [
"llama_index.llms.base.ChatMessage",
"llama_index.llms.LlamaCPP"
] | [((366, 375), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (373, 375), True, 'import sounddevice as sd\n'), ((498, 564), 'wavio.write', 'wavio.write', (['output_filename', 'audio_data', 'sample_rate'], {'sampwidth': '(2)'}), '(output_filename, audio_data, sample_rate, sampwidth=2)\n', (509, 564), False, 'import wavio\n'), ((617, 643), 'whisper.load_model', 'whisper.load_model', (['"""base"""'], {}), "('base')\n", (635, 643), False, 'import whisper\n'), ((839, 864), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'path'}), '(model_path=path)\n', (847, 864), False, 'from llama_index.llms import LlamaCPP\n'), ((880, 984), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'f"""check grammar and the correct format for the following: {text}"""'}), "(role='user', content=\n f'check grammar and the correct format for the following: {text}')\n", (891, 984), False, 'from llama_index.llms.base import ChatMessage\n'), ((1301, 1332), 'sounddevice.query_devices', 'sd.query_devices', (['None', '"""input"""'], {}), "(None, 'input')\n", (1317, 1332), True, 'import sounddevice as sd\n')] |
import argparse
import os
from llama_index import StorageContext, load_index_from_storage
from dotenv import load_dotenv
from llama_index import VectorStoreIndex, SimpleDirectoryReader
def query_data(query: str):
"""Query to a vector database
## argument
Return: return_description
"""
storage_context = StorageContext.from_defaults(persist_dir="./storage")
# load index
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine()
user_query = query_engine.query(query)
user_query = user_query.response
print(user_query)
return user_query
# x = 0
def main():
parser = argparse.ArgumentParser(description='Query a vector database.')
parser.add_argument('query', type=str, help='Query to be executed')
args = parser.parse_args()
query_data(args.query)
if __name__ == "__main__":
main()
| [
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((335, 388), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (363, 388), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((418, 458), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (441, 458), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((665, 728), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query a vector database."""'}), "(description='Query a vector database.')\n", (688, 728), False, 'import argparse\n')] |
from llama_index import SimpleDirectoryReader
from llama_index import ServiceContext
from langchain.chat_models import ChatOpenAI
from llama_index import VectorStoreIndex
from utils import build_sentence_window_index
from utils import build_automerging_index
import sys
import os
import logging
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
# get config values
src_data_dir = config['index']['src_data_dir']
basic_idx_dir = config['index']['basic_idx_dir']
sent_win_idx_dir = config['index']['sent_win_idx_dir']
auto_mrg_idx_dir = config['index']['auto_mrg_idx_dir']
modelname = config['index']['modelname']
embed_modelname = config['index']['embedmodel']
def check_and_create_directory(directory_path):
if not os.path.exists(directory_path):
os.makedirs(directory_path)
print(f"Directory '{directory_path}' created successfully.")
else:
print(f"Directory '{directory_path}' already exists.")
def construct_basic_index(src_directory_path,index_directory):
check_and_create_directory(index_directory)
llm =ChatOpenAI(temperature=0.1, model_name=modelname)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=embed_modelname
)
documents = SimpleDirectoryReader(src_directory_path).load_data()
index = VectorStoreIndex.from_documents(documents,
service_context=service_context)
index.storage_context.persist(persist_dir=index_directory)
return index
def construct_sentencewindow_index(src_directory_path,index_directory):
llm =ChatOpenAI(temperature=0.1, model_name=modelname)
documents = SimpleDirectoryReader(src_directory_path).load_data()
index = build_sentence_window_index(
documents,
llm,
embed_model=embed_modelname,
save_dir=index_directory
)
return index
def construct_automerge_index(src_directory_path,index_directory):
llm =ChatOpenAI(temperature=0.1, model_name=modelname)
documents = SimpleDirectoryReader(src_directory_path).load_data()
index = build_automerging_index(
documents,
llm,
embed_model=embed_modelname,
save_dir=index_directory
)
return index
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#Create basic index
index = construct_basic_index(src_data_dir,basic_idx_dir)
#create sentencewindow index
sentindex = construct_sentencewindow_index(src_data_dir,sent_win_idx_dir)
#create automerge index
autoindex = construct_automerge_index(src_data_dir,auto_mrg_idx_dir) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader"
] | [((328, 355), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (353, 355), False, 'import configparser\n'), ((2287, 2346), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (2306, 2346), False, 'import logging\n'), ((1121, 1170), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': 'modelname'}), '(temperature=0.1, model_name=modelname)\n', (1131, 1170), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1193, 1259), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_modelname'}), '(llm=llm, embed_model=embed_modelname)\n', (1221, 1259), False, 'from llama_index import ServiceContext\n'), ((1360, 1435), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1391, 1435), False, 'from llama_index import VectorStoreIndex\n'), ((1663, 1712), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': 'modelname'}), '(temperature=0.1, model_name=modelname)\n', (1673, 1712), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1795, 1897), 'utils.build_sentence_window_index', 'build_sentence_window_index', (['documents', 'llm'], {'embed_model': 'embed_modelname', 'save_dir': 'index_directory'}), '(documents, llm, embed_model=embed_modelname,\n save_dir=index_directory)\n', (1822, 1897), False, 'from utils import build_sentence_window_index\n'), ((2014, 2063), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': 'modelname'}), '(temperature=0.1, model_name=modelname)\n', (2024, 2063), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2146, 2244), 'utils.build_automerging_index', 'build_automerging_index', (['documents', 'llm'], {'embed_model': 'embed_modelname', 'save_dir': 'index_directory'}), '(documents, llm, embed_model=embed_modelname,\n save_dir=index_directory)\n', (2169, 2244), False, 'from utils import build_automerging_index\n'), ((2378, 2418), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (2399, 2418), False, 'import logging\n'), ((769, 799), 'os.path.exists', 'os.path.exists', (['directory_path'], {}), '(directory_path)\n', (783, 799), False, 'import os\n'), ((809, 836), 'os.makedirs', 'os.makedirs', (['directory_path'], {}), '(directory_path)\n', (820, 836), False, 'import os\n'), ((2347, 2366), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2364, 2366), False, 'import logging\n'), ((1294, 1335), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['src_directory_path'], {}), '(src_directory_path)\n', (1315, 1335), False, 'from llama_index import SimpleDirectoryReader\n'), ((1729, 1770), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['src_directory_path'], {}), '(src_directory_path)\n', (1750, 1770), False, 'from llama_index import SimpleDirectoryReader\n'), ((2080, 2121), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['src_directory_path'], {}), '(src_directory_path)\n', (2101, 2121), False, 'from llama_index import SimpleDirectoryReader\n')] |