File size: 7,211 Bytes
d492aea 4ef8025 d492aea 4ef8025 d492aea 4ef8025 10de591 d492aea 4ef8025 10de591 d492aea 10de591 d492aea 10de591 d492aea 10de591 d492aea 4ef8025 d492aea 10de591 d492aea 10de591 d492aea 4ef8025 d492aea 4ef8025 10de591 d492aea 10de591 d492aea 10de591 d492aea 10de591 d492aea 4ef8025 10de591 d492aea 4ef8025 10de591 d492aea 4ef8025 10de591 d492aea 4ef8025 d492aea 4ef8025 d492aea 10de591 4ef8025 10de591 4ef8025 d492aea 10de591 cda102e d4416e0 cda102e b64272f cda102e b64272f cda102e d492aea f0abe0a d492aea 10de591 4ef8025 10de591 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
from concurrent.futures import ThreadPoolExecutor
import os
import random
import subprocess
import timeit
import datasets
"""Example
git init
git remote add origin https://github.com/huggingface/evaluate.git
git fetch --depth 2 origin 9b056cdd5eb95459ae80142014865263e7dd75b8
# Get file after change
git checkout FETCH_HEAD -- README.md
# Get file before change
git checkout FETCH_HEAD^ -- README.md
"""
# In the multiprocessing case, the below leads to each process creating the same directory
# random = random.Random(42) # make it reproducible
NUM_THREADS = 64
NUM_PROC = 64
# DEBUG_SIZE = 1024
CWD = os.getcwd()
# Shell utils
def run_in_shell(cmd: str, cwd=None, timeout=60):
return subprocess.run([cmd], capture_output=True, shell=True, cwd=cwd, timeout=timeout)
def get_file_contents(commit, old_file, new_file, repo, cwd=None):
completed = run_in_shell("git init", cwd=cwd)
completed = run_in_shell("git remote add origin " + repo, cwd=cwd)
completed = run_in_shell("git fetch --depth 2 origin " + commit, cwd=cwd)
# If it requires authentication
if completed.returncode != 0:
#print("ERRORC1", completed)
return ("", "", completed.returncode, completed.stderr.decode(errors='ignore'))
# Optionally do git diff at the same time (Saving code needs to be added)
# git_diff = run_in_shell(f"git diff {commit}^ {commit}", cwd=cwd).stdout.decode(errors='ignore')
completed = run_in_shell("git checkout FETCH_HEAD -- " + new_file, cwd=cwd)
new_contents = run_in_shell("cat " + new_file, cwd=cwd).stdout.decode(errors='ignore')
completed = run_in_shell("git checkout FETCH_HEAD^ -- " + old_file, cwd=cwd)
# If there's only a new file, but no old file
if completed.returncode != 0:
#print("ERRORC2", completed)
return (new_contents, "", completed.returncode, completed.stderr.decode(errors='ignore'))
old_contents = run_in_shell("cat " + old_file, cwd=cwd).stdout.decode(errors='ignore')
return (new_contents, old_contents, completed.returncode, completed.stderr.decode(errors='ignore'))
def get_diff(ex):
commit_id = ex["commit"]
repos = list(set(ex["repos"].split(",")))
old_file = ex["old_file"]
new_file = ex["new_file"]
# Initialize
returncode = 0
stderr = "unknown"
for i, repo in enumerate(repos):
repo = "https://xxx:[email protected]/" + repo + ".git"
# Create a random directory to store the repo
random_dir = CWD + "/" + str(random.randint(0, 1000000))
# Can take very long when running many processes
run_in_shell("mkdir " + random_dir, timeout=300)
try:
new_contents, old_contents, returncode, stderr = get_file_contents(commit_id, old_file, new_file, repo, cwd=random_dir)
except Exception as e:
#print("ERROR", commit_id, old_file, new_file, repo, str(random_dir), e)
# Break in case of many repos that all lead us nowhere
if i > 10:
break
continue
finally:
run_in_shell("rm -rf " + random_dir) # clean up again
ex["new_contents"] = new_contents
ex["old_contents"] = old_contents
ex["returncode"] = returncode
ex["stderr"] = stderr
return ex
# If no repo worked
ex["new_contents"] = ""
ex["old_contents"] = ""
ex["returncode"] = returncode
ex["stderr"] = stderr
return ex
def get_diff_multi_threaded_processed(batch):
with ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
# Convert dict of lists to list of dicts then map to threads
results = list(executor.map(get_diff, [dict(zip(batch,t)) for t in zip(*batch.values())]))
# Convert list of dicts to dict of lists
return {k: [dic[k] for dic in results] for k in results[0]}
if __name__ == "__main__":
# git clone https://huggingface.co/datasets/bigcode/github-commits
ds = datasets.load_dataset("./github-commits", use_auth_token=True)["train"]
### OPTIONAL FILTERING ###
#"""
java = [".java"]
javascript = [
".js",
"._js",
".bones",
".es6",
".jake",
".jsb",
".jscad",
".jsfl",
".jsm",
".jss",
".njs",
".pac",
".sjs",
".ssjs",
".xsjs",
".xsjslib"
]
python = [
".py",
".bzl",
".gyp",
".lmi",
".pyde",
".pyp",
".pyt",
".pyw",
".tac",
".wsgi",
".xpy"
]
import json
with open("programming-languages.json", "r") as f:
extensions = json.load(f)
suffices = tuple([suffix for suffices in extensions.values() for suffix in suffices])
def filter_extension(ex):
return ex["new_file"].endswith(suffices)
def filter_extension_python(ex):
return ex["new_file"].endswith(python)
def filter_update(ex):
return ex["message"] != "Update " + ex["old_file"]
filter_msg = ["initial commit", "please\n", "please", "lalala"]
def filter_misc(ex):
return ex["message"] not in filter_msg
# Removes ~10M
ds = ds.filter(filter_extension, num_proc=NUM_PROC)
print("After Extension filter", len(ds))
# Removes ~1M
ds = ds.filter(filter_update, num_proc=NUM_PROC)
print("After Update filter", len(ds))
#ds = ds.filter(filter_extension_python, num_proc=NUM_PROC)
#print("After Python filter", len(ds))
ds = ds.filter(filter_misc, num_proc=NUM_PROC)
print("After Misc filter", len(ds))
#ds = ds.select(range(DEBUG_SIZE))
START = 0 # Modify for each instance (0 - 7)
samples_per_instance = 64 * 64 * 64 * 32 # 8_388_608
select_start = START * samples_per_instance
select_end = START * samples_per_instance + samples_per_instance
ds = ds.select(range(select_start, select_end))
print(f"Going from {select_start} till {select_end}")
#"""
### END FILTERING ###
### ALTERNATIVELY LOAD EXISTING SPLIT ###
"""
path = "github-commits-diff/data/diffs_50331648_58720256.jsonl"
ds = datasets.load_dataset("json", data_files=path)
sub_ds = ds.filter(lambda x: x['stderr'].startswith("fatal: unable to acces"))
skipped_ds = ds.filter(lambda x: not(x['stderr'].startswith("fatal")))
datasets.concatenate_datasets((
skipped_ds,
sub_ds.map(get_diff_multi_threaded_processed, num_proc=NUM_PROC, batch_size=NUM_THREADS, batched=True),
)).to_json(path.replace(".", "_new."), num_proc=NUM_PROC)
exit()
"""
### END LOAD EXISTING ###
def run_multi_processing_threading():
ds.map(get_diff_multi_threaded_processed, num_proc=NUM_PROC, batch_size=NUM_THREADS, batched=True).to_json(f"diffs_{select_start}_{select_end}.jsonl", num_proc=NUM_PROC)
# Benchmarking
#NUM_TRIALS = 1
#print(f"Timing multithreading + multiprocessing using {NUM_THREADS} threads and {NUM_PROC} processes")
#time = timeit.timeit(stmt=run_multi_processing_threading, number=NUM_TRIALS)
#print("Time:", time)
#with open("mpt.txt", "w") as f:
# f.write(str(time))
# Running
run_multi_processing_threading() |