code
stringlengths 193
97.3k
| apis
sequencelengths 1
8
| extract_api
stringlengths 113
214k
|
---|---|---|
import pytest
import os
import openai
import argparse
import lancedb
import re
import pickle
import requests
import zipfile
from pathlib import Path
from main import get_document_title
from langchain.document_loaders import BSHTMLLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import LanceDB
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
# TESTING ===============================================================
@pytest.fixture
def mock_embed(monkeypatch):
def mock_embed_query(query, x):
return [0.5, 0.5]
monkeypatch.setattr(OpenAIEmbeddings, "embed_query", mock_embed_query)
def test_main(mock_embed):
os.mkdir("./tmp")
args = argparse.Namespace(query="test", openai_key="test")
os.environ["OPENAI_API_KEY"] = "test"
docs_path = Path("docs.pkl")
docs = []
pandas_docs = requests.get(
"https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"
)
with open("./tmp/pandas.documentation.zip", "wb") as f:
f.write(pandas_docs.content)
file = zipfile.ZipFile("./tmp/pandas.documentation.zip")
file.extractall(path="./tmp/pandas_docs")
if not docs_path.exists():
for p in Path("./tmp/pandas_docs/pandas.documentation").rglob("*.html"):
print(p)
if p.is_dir():
continue
loader = BSHTMLLoader(p, open_encoding="utf8")
raw_document = loader.load()
m = {}
m["title"] = get_document_title(raw_document[0])
m["version"] = "2.0rc0"
raw_document[0].metadata = raw_document[0].metadata | m
raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"])
docs = docs + raw_document
with docs_path.open("wb") as fh:
pickle.dump(docs, fh)
else:
with docs_path.open("rb") as fh:
docs = pickle.load(fh)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
documents = text_splitter.split_documents(docs)
db = lancedb.connect("./tmp/lancedb")
table = db.create_table(
"pandas_docs",
data=[
{
"vector": OpenAIEmbeddings().embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
# docsearch = LanceDB.from_documents(documents, OpenAIEmbeddings, connection=table)
# qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever())
# result = qa.run(args.query)
# print(result)
| [
"lancedb.connect"
] | [((766, 783), 'os.mkdir', 'os.mkdir', (['"""./tmp"""'], {}), "('./tmp')\n", (774, 783), False, 'import os\n'), ((795, 846), 'argparse.Namespace', 'argparse.Namespace', ([], {'query': '"""test"""', 'openai_key': '"""test"""'}), "(query='test', openai_key='test')\n", (813, 846), False, 'import argparse\n'), ((906, 922), 'pathlib.Path', 'Path', (['"""docs.pkl"""'], {}), "('docs.pkl')\n", (910, 922), False, 'from pathlib import Path\n'), ((956, 1073), 'requests.get', 'requests.get', (['"""https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"""'], {}), "(\n 'https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip'\n )\n", (968, 1073), False, 'import requests\n'), ((1187, 1236), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""./tmp/pandas.documentation.zip"""'], {}), "('./tmp/pandas.documentation.zip')\n", (1202, 1236), False, 'import zipfile\n'), ((2065, 2131), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (2095, 2131), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2217, 2249), 'lancedb.connect', 'lancedb.connect', (['"""./tmp/lancedb"""'], {}), "('./tmp/lancedb')\n", (2232, 2249), False, 'import lancedb\n'), ((1490, 1527), 'langchain.document_loaders.BSHTMLLoader', 'BSHTMLLoader', (['p'], {'open_encoding': '"""utf8"""'}), "(p, open_encoding='utf8')\n", (1502, 1527), False, 'from langchain.document_loaders import BSHTMLLoader\n'), ((1614, 1649), 'main.get_document_title', 'get_document_title', (['raw_document[0]'], {}), '(raw_document[0])\n', (1632, 1649), False, 'from main import get_document_title\n'), ((1936, 1957), 'pickle.dump', 'pickle.dump', (['docs', 'fh'], {}), '(docs, fh)\n', (1947, 1957), False, 'import pickle\n'), ((2028, 2043), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2039, 2043), False, 'import pickle\n'), ((1332, 1378), 'pathlib.Path', 'Path', (['"""./tmp/pandas_docs/pandas.documentation"""'], {}), "('./tmp/pandas_docs/pandas.documentation')\n", (1336, 1378), False, 'from pathlib import Path\n'), ((2357, 2375), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2373, 2375), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |