Muennighoff commited on
Commit
4ef8025
·
1 Parent(s): d492aea

Update multi_threaded_processed.py

Browse files
Files changed (1) hide show
  1. multi_threaded_processed.py +31 -27
multi_threaded_processed.py CHANGED
@@ -19,9 +19,9 @@ git checkout FETCH_HEAD^ -- README.md
19
  # In the multiprocessing case, the below leads to each process creating the same directory
20
  # random = random.Random(42) # make it reproducible
21
 
22
- NUM_THREADS = 16
23
  NUM_PROC = 64
24
- DEBUG_SIZE = 4096
25
 
26
  CWD = os.getcwd()
27
 
@@ -35,6 +35,7 @@ def get_file_contents(commit, old_file, new_file, repo, cwd=None):
35
  completed = run_in_shell("git fetch --depth 2 origin " + commit, cwd=cwd)
36
  # If it requires authentication
37
  if completed.returncode != 0:
 
38
  return ("", "")
39
  # Optionally do git diff at the same time (Saving code needs to be added)
40
  # git_diff = run_in_shell(f"git diff {commit}^ {commit}", cwd=cwd).stdout.decode(errors='ignore')
@@ -43,6 +44,7 @@ def get_file_contents(commit, old_file, new_file, repo, cwd=None):
43
  completed = run_in_shell("git checkout FETCH_HEAD^ -- " + old_file, cwd=cwd)
44
  # If there's only a new file, but no old file
45
  if completed.returncode != 0:
 
46
  return (new_contents, "")
47
  old_contents = run_in_shell("cat " + old_file, cwd=cwd).stdout.decode(errors='ignore')
48
  return (new_contents, old_contents)
@@ -61,6 +63,7 @@ def get_diff(ex):
61
  try:
62
  new_contents, old_contents = get_file_contents(commit_id, old_file, new_file, repo, cwd=random_dir)
63
  except Exception as e:
 
64
  # Break in case of many repos that all lead us nowhere
65
  if i > 10:
66
  break
@@ -83,11 +86,11 @@ def get_diff_multi_threaded_processed(batch):
83
  return {k: [dic[k] for dic in results] for k in results[0]}
84
 
85
  if __name__ == "__main__":
86
- # git clone bigcode/github-commits
87
  ds = datasets.load_dataset("./github-commits", use_auth_token=True)["train"]
88
 
89
  ### OPTIONAL FILTERING ###
90
- """
91
  java = [".java"]
92
  javascript = [
93
  ".js",
@@ -126,46 +129,47 @@ if __name__ == "__main__":
126
  extensions = json.load(f)
127
  suffices = [suffix for suffices in extensions.values() for suffix in suffices]
128
  def filter_extension(ex):
129
- splits = ex["new_file"].split(".")
130
- if len(splits) == 1: return False
131
- return "." + splits[-1] in suffices
132
  def filter_extension_python(ex):
133
- splits = ex["new_file"].split(".")
134
- if len(splits) == 1: return False
135
- splits = ex["new_file"].split(".")
136
- return "." + splits[-1] in python
137
  def filter_update(ex):
138
  return ex["message"] != "Update " + ex["old_file"]
139
 
140
- filter_msg = ["initial commit", "please\n", "please"]
141
 
142
  def filter_misc(ex):
143
  return ex["message"] not in filter_msg
 
144
  # Removes ~10M
145
  ds = ds.filter(filter_extension, num_proc=NUM_PROC)
146
  print("After Extension filter", len(ds))
147
  # Removes ~1M
148
  ds = ds.filter(filter_update, num_proc=NUM_PROC)
149
  print("After Update filter", len(ds))
150
- ds = ds.filter(filter_extension_python, num_proc=NUM_PROC)
151
- print("After Python filter", len(ds))
152
  ds = ds.filter(filter_misc, num_proc=NUM_PROC)
153
  print("After Misc filter", len(ds))
154
- ds = ds.select(range(DEBUG_SIZE))
155
- """
 
 
 
 
 
 
 
156
  ### END FILTERING ###
157
 
158
  def run_multi_processing_threading():
159
  ds.map(get_diff_multi_threaded_processed, num_proc=NUM_PROC, batch_size=NUM_THREADS, batched=True).to_json("mpt.jsonl")
160
 
161
- NUM_TRIALS = 1
162
- print(f"Timing multithreading + multiprocessing using {NUM_THREADS} threads and {NUM_PROC} processes")
163
- time = timeit.timeit(stmt=run_multi_processing_threading, number=NUM_TRIALS)
164
- print("Time:", time)
165
- with open("mpt.txt", "w") as f:
166
- f.write(str(time))
167
-
168
- #run_multi_processing_threading()
169
-
170
-
171
-
 
19
  # In the multiprocessing case, the below leads to each process creating the same directory
20
  # random = random.Random(42) # make it reproducible
21
 
22
+ NUM_THREADS = 64
23
  NUM_PROC = 64
24
+ # DEBUG_SIZE = 1024
25
 
26
  CWD = os.getcwd()
27
 
 
35
  completed = run_in_shell("git fetch --depth 2 origin " + commit, cwd=cwd)
36
  # If it requires authentication
37
  if completed.returncode != 0:
38
+ #print("ERRORC1", completed)
39
  return ("", "")
40
  # Optionally do git diff at the same time (Saving code needs to be added)
41
  # git_diff = run_in_shell(f"git diff {commit}^ {commit}", cwd=cwd).stdout.decode(errors='ignore')
 
44
  completed = run_in_shell("git checkout FETCH_HEAD^ -- " + old_file, cwd=cwd)
45
  # If there's only a new file, but no old file
46
  if completed.returncode != 0:
47
+ #print("ERRORC2", completed)
48
  return (new_contents, "")
49
  old_contents = run_in_shell("cat " + old_file, cwd=cwd).stdout.decode(errors='ignore')
50
  return (new_contents, old_contents)
 
63
  try:
64
  new_contents, old_contents = get_file_contents(commit_id, old_file, new_file, repo, cwd=random_dir)
65
  except Exception as e:
66
+ #print("ERROR", commit_id, old_file, new_file, repo, str(random_dir), e)
67
  # Break in case of many repos that all lead us nowhere
68
  if i > 10:
69
  break
 
86
  return {k: [dic[k] for dic in results] for k in results[0]}
87
 
88
  if __name__ == "__main__":
89
+ # git clone https://huggingface.co/datasets/bigcode/github-commits
90
  ds = datasets.load_dataset("./github-commits", use_auth_token=True)["train"]
91
 
92
  ### OPTIONAL FILTERING ###
93
+ #"""
94
  java = [".java"]
95
  javascript = [
96
  ".js",
 
129
  extensions = json.load(f)
130
  suffices = [suffix for suffices in extensions.values() for suffix in suffices]
131
  def filter_extension(ex):
132
+ return ex["new_file"].endswith(suffices)
133
+
 
134
  def filter_extension_python(ex):
135
+ return ex["new_file"].endswith(python)
136
+
 
 
137
  def filter_update(ex):
138
  return ex["message"] != "Update " + ex["old_file"]
139
 
140
+ filter_msg = ["initial commit", "please\n", "please", "lalala"]
141
 
142
  def filter_misc(ex):
143
  return ex["message"] not in filter_msg
144
+
145
  # Removes ~10M
146
  ds = ds.filter(filter_extension, num_proc=NUM_PROC)
147
  print("After Extension filter", len(ds))
148
  # Removes ~1M
149
  ds = ds.filter(filter_update, num_proc=NUM_PROC)
150
  print("After Update filter", len(ds))
151
+ #ds = ds.filter(filter_extension_python, num_proc=NUM_PROC)
152
+ #print("After Python filter", len(ds))
153
  ds = ds.filter(filter_misc, num_proc=NUM_PROC)
154
  print("After Misc filter", len(ds))
155
+ #ds = ds.select(range(DEBUG_SIZE))
156
+ START = 0 # Modify for each instance (0 - 7)
157
+ samples_per_instance = 64 * 64 * 64 * 32 # 8_388_608
158
+ select_start = START * samples_per_instance
159
+ select_end = START * samples_per_instance + samples_per_instance
160
+ ds = ds.select(range(select_start, select_end))
161
+ print(f"Going from {select_start} till {select_end}")
162
+
163
+ #"""
164
  ### END FILTERING ###
165
 
166
  def run_multi_processing_threading():
167
  ds.map(get_diff_multi_threaded_processed, num_proc=NUM_PROC, batch_size=NUM_THREADS, batched=True).to_json("mpt.jsonl")
168
 
169
+ #NUM_TRIALS = 1
170
+ #print(f"Timing multithreading + multiprocessing using {NUM_THREADS} threads and {NUM_PROC} processes")
171
+ #time = timeit.timeit(stmt=run_multi_processing_threading, number=NUM_TRIALS)
172
+ #print("Time:", time)
173
+ #with open("mpt.txt", "w") as f:
174
+ # f.write(str(time))
175
+ run_multi_processing_threading()