heaversm commited on
Commit
aee00f4
·
1 Parent(s): 6d8f511

remove unused files, add faiss-cpu

Browse files
code_sim_index/index.faiss DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:acdf704e8ab7d216c5ab0582681c7dc13e2ab349faeb45b43d817b2b95effe3b
3
- size 3462189
 
 
 
 
code_sim_index/index.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:88ab0cf9c92a95f849f9b7a2c896949219a2b776dc26c4c1f87668a2d6d6e0c9
3
- size 1205614
 
 
 
 
codesearchdb.pickle DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:af8c505cc7289eda3880bb53050c55d0b6c901e34b61a936760b17b9eb5ce934
3
- size 504758467
 
 
 
 
github.py DELETED
@@ -1,67 +0,0 @@
1
- import os
2
- from dotenv import load_dotenv
3
- from langchain.document_loaders import GithubFileLoader
4
- # from langchain.embeddings import HuggingFaceEmbeddings
5
- from langchain_huggingface import HuggingFaceEmbeddings
6
- from langchain_community.vectorstores import FAISS
7
- from langchain_text_splitters import CharacterTextSplitter
8
-
9
- load_dotenv()
10
-
11
- #get the GITHUB_ACCESS_TOKEN from the .env file
12
- GITHUB_ACCESS_TOKEN = os.getenv("GITHUB_ACCESS_TOKEN")
13
- USER = "heaversm"
14
- REPO = "gdrive-docker"
15
- GITHUB_BASE_URL = "https://github.com/"
16
-
17
-
18
- def get_similar_files(query, db, embeddings):
19
- # embedding_vector = embeddings.embed_query(query)
20
- # docs_and_scores = db.similarity_search_by_vector(embedding_vector, k = 10)
21
- docs_and_scores = db.similarity_search_with_score(query)
22
- return docs_and_scores
23
-
24
- def get_hugging_face_model():
25
- model_name = "mchochlov/codebert-base-cd-ft"
26
- hf = HuggingFaceEmbeddings(model_name=model_name)
27
- return hf
28
-
29
- loader = GithubFileLoader(
30
- #repo is USER/REPO
31
- repo=f"{USER}/{REPO}",
32
- access_token=GITHUB_ACCESS_TOKEN,
33
- github_api_url="https://api.github.com",
34
- file_filter=lambda file_path: file_path.endswith(
35
- (".py", ".ts")
36
- ), # load all python and typescript files
37
- )
38
- documents = loader.load()
39
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
40
- docs = text_splitter.split_documents(documents)
41
- embedding_vector = get_hugging_face_model()
42
- db = FAISS.from_documents(docs, embedding_vector)
43
- model_name = "mchochlov/codebert-base-cd-ft"
44
-
45
- query = """
46
- def create_app():
47
- app = connexion.FlaskApp(__name__, specification_dir="../.openapi")
48
- app.add_api(
49
- API_VERSION, resolver=connexion.resolver.RelativeResolver("provider.app")
50
- )
51
- """
52
- results_with_scores = get_similar_files(query, db, embedding_vector)
53
- print ("retrieved!!!")
54
- print(f"Number of results: {len(results_with_scores)}")
55
-
56
- # score is a distance score, the lower the better
57
- for doc, score in results_with_scores:
58
- print(f"Metadata: {doc.metadata}, Score: {score}")
59
-
60
- top_file_path = results_with_scores[0][0].metadata['path']
61
- top_file_content = results_with_scores[0][0].page_content
62
- top_file_score = results_with_scores[0][1]
63
- top_file_link = f"{GITHUB_BASE_URL}{USER}/{REPO}/blob/main/{top_file_path}"
64
-
65
- print(f"Top file link: {top_file_link}")
66
-
67
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
github_st.py DELETED
@@ -1,88 +0,0 @@
1
- import streamlit as st
2
- import os
3
- from dotenv import load_dotenv
4
- from langchain.document_loaders import GithubFileLoader
5
- # from langchain.embeddings import HuggingFaceEmbeddings
6
- from langchain_huggingface import HuggingFaceEmbeddings
7
- from langchain_community.vectorstores import FAISS
8
- from langchain_text_splitters import CharacterTextSplitter
9
-
10
- load_dotenv()
11
-
12
- #get the GITHUB_ACCESS_TOKEN from the .env file
13
- GITHUB_ACCESS_TOKEN = os.getenv("GITHUB_ACCESS_TOKEN")
14
- GITHUB_BASE_URL = "https://github.com/"
15
-
16
-
17
- @st.cache_resource
18
- def get_hugging_face_model():
19
- model_name = "mchochlov/codebert-base-cd-ft"
20
- hf = HuggingFaceEmbeddings(model_name=model_name)
21
- return hf
22
-
23
- def get_similar_files(query, db, embeddings):
24
- # embedding_vector = embeddings.embed_query(query)
25
- # docs_and_scores = db.similarity_search_by_vector(embedding_vector, k = 10)
26
- docs_and_scores = db.similarity_search_with_score(query)
27
- return docs_and_scores
28
-
29
-
30
-
31
-
32
- st.title("Find Similar Code")
33
-
34
- #streamlit text input for USER
35
- USER = st.text_input("Enter the Github User", value = "heaversm")
36
- #streamlit text input for REPO
37
- REPO = st.text_input("Enter the Github Repository", value = "gdrive-docker")
38
-
39
- #streamlit file type selector
40
- FILE_TYPES_TO_LOAD = st.multiselect("Select File Types", [".py", ".ts",".js",".css",".html"], default = [".py"])
41
-
42
-
43
- text_input = st.text_area("Enter a Code Example", value =
44
- """
45
- def create_app():
46
- app = connexion.FlaskApp(__name__, specification_dir="../.openapi")
47
- app.add_api(
48
- API_VERSION, resolver=connexion.resolver.RelativeResolver("provider.app")
49
- )
50
- """, height = 330
51
- )
52
-
53
- button = st.button("Find Similar Code")
54
-
55
-
56
- if button:
57
- loader = GithubFileLoader(
58
- #repo is USER/REPO
59
- repo=f"{USER}/{REPO}",
60
- access_token=GITHUB_ACCESS_TOKEN,
61
- github_api_url="https://api.github.com",
62
- file_filter=lambda file_path: file_path.endswith(
63
- tuple(FILE_TYPES_TO_LOAD)
64
- )
65
- )
66
- documents = loader.load()
67
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
68
- docs = text_splitter.split_documents(documents)
69
- embedding_vector = get_hugging_face_model()
70
- db = FAISS.from_documents(docs, embedding_vector)
71
- query = text_input
72
- results_with_scores = get_similar_files(query, db, embedding_vector)
73
- for doc, score in results_with_scores:
74
- print(f"Metadata: {doc.metadata}, Score: {score}")
75
-
76
- top_file_path = results_with_scores[0][0].metadata['path']
77
- top_file_content = results_with_scores[0][0].page_content
78
- top_file_score = results_with_scores[0][1]
79
- top_file_link = f"{GITHUB_BASE_URL}{USER}/{REPO}/blob/main/{top_file_path}"
80
- # write a clickable link in streamlit
81
- st.markdown(f"[Top file link]({top_file_link})")
82
-
83
-
84
- else:
85
- st.info("Please Submit a Code Sample")
86
-
87
-
88
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -5,4 +5,5 @@ langchain-community
5
  langchain_huggingface
6
  langchain_text_splitters
7
  sentence-transformers
 
8
  altair==4.0
 
5
  langchain_huggingface
6
  langchain_text_splitters
7
  sentence-transformers
8
+ faiss-cpu
9
  altair==4.0
search-pickle.py DELETED
@@ -1,99 +0,0 @@
1
- import streamlit as st
2
- from bs4 import BeautifulSoup
3
- from langchain.embeddings import HuggingFaceEmbeddings
4
- import pickle
5
- import torch
6
- import io
7
- from langchain.vectorstores import FAISS
8
- import json
9
-
10
- class CPU_Unpickler(pickle.Unpickler):
11
- def find_class(self, module, name):
12
- if module == 'torch.storage' and name == '_load_from_bytes':
13
- return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
14
- else: return super().find_class(module, name)
15
-
16
-
17
- @st.cache_resource
18
- def get_hugging_face_model():
19
- model_name = "mchochlov/codebert-base-cd-ft"
20
- hf = HuggingFaceEmbeddings(model_name=model_name)
21
- return hf
22
-
23
-
24
- @st.cache_resource
25
- def get_db():
26
- with open("codesearchdb.pickle", "rb") as f:
27
- db = CPU_Unpickler(f).load()
28
- print("Loaded db")
29
- # save_as_json(db, "codesearchdb.json") # Save as JSON
30
- return db
31
-
32
- def save_as_json(data, filename):
33
- # Convert the data to a JSON serializable format
34
- serializable_data = data_to_serializable(data)
35
- with open(filename, "w") as json_file:
36
- json.dump(serializable_data, json_file)
37
-
38
- def data_to_serializable(data):
39
- if isinstance(data, dict):
40
- return {k: data_to_serializable(v) for k, v in data.items() if not callable(v) and not isinstance(v, type)}
41
- elif isinstance(data, list):
42
- return [data_to_serializable(item) for item in data]
43
- elif isinstance(data, (str, int, float, bool)) or data is None:
44
- return data
45
- elif hasattr(data, '__dict__'):
46
- return data_to_serializable(data.__dict__)
47
- elif hasattr(data, '__slots__'):
48
- return {slot: data_to_serializable(getattr(data, slot)) for slot in data.__slots__}
49
- else:
50
- return str(data) # Convert any other types to string
51
-
52
- def get_similar_links(query, db, embeddings):
53
- embedding_vector = embeddings.embed_query(query)
54
- docs_and_scores = db.similarity_search_by_vector(embedding_vector, k = 10)
55
- hrefs = []
56
- for docs in docs_and_scores:
57
- html_doc = docs.page_content
58
- soup = BeautifulSoup(html_doc, 'html.parser')
59
- href = [a['href'] for a in soup.find_all('a', href=True)]
60
- hrefs.append(href)
61
- links = []
62
- for href_list in hrefs:
63
- for link in href_list:
64
- links.append(link)
65
- return links
66
-
67
-
68
- embedding_vector = get_hugging_face_model()
69
- db = FAISS.load_local("code_sim_index", embedding_vector, allow_dangerous_deserialization=True)
70
- save_as_json(db, "code_sim_index.json") # Save as JSON
71
-
72
- st.title("Find Similar Code")
73
- text_input = st.text_area("Enter a Code Example", value =
74
- """
75
- class Solution:
76
- def subsets(self, nums: List[int]) -> List[List[int]]:
77
- outputs = []
78
- def backtrack(k, index, subSet):
79
- if index == k:
80
- outputs.append(subSet[:])
81
- return
82
- for i in range(index, len(nums)):
83
- backtrack(k, i + 1, subSet + [nums[i]])
84
- for j in range(len(nums) + 1):
85
- backtrack(j, 0, [])
86
- return outputs
87
- """, height = 330
88
- )
89
- button = st.button("Find Similar Questions")
90
- if button:
91
- query = text_input
92
- answer = get_similar_links(query, db, embedding_vector)
93
- for link in set(answer):
94
- st.write(link)
95
-
96
- else:
97
- st.info("Please Input Valid Text")
98
-
99
- # get_db()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
unpickle.py DELETED
@@ -1,19 +0,0 @@
1
- import pickle
2
-
3
- # Define the path to the pickle file
4
- pickle_file_path = 'codesearchdb.pickle'
5
-
6
- # Load the pickle file
7
- with open(pickle_file_path, 'rb') as file:
8
- data = pickle.load(file)
9
-
10
-
11
-
12
- # Save the contents to a new file (for example, a JSON file)
13
- import json
14
-
15
- json_file_path = 'codesearchdb.json'
16
- with open(json_file_path, 'w') as json_file:
17
- json.dump(data, json_file, indent=4)
18
-
19
- print(f"Contents have been saved to {json_file_path}")