Datasets:
Add additional schemas
Browse files- test.jsonl.gz +2 -2
- train.jsonl.gz +2 -2
- train_split.py +1 -1
- validate_schemas.py +9 -6
- validation.jsonl.gz +2 -2
test.jsonl.gz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:58d2374d03fc37794559df99f336c83b6631e0e565548e7e3b2ada2a72f083b7
|
3 |
+
size 2525492
|
train.jsonl.gz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94046ca1146e3160cef4e57b3592b6405f6e6fc6f204abefee45670fdf876051
|
3 |
+
size 44357058
|
train_split.py
CHANGED
@@ -137,7 +137,7 @@ def main(similarity, split, seed, commits_file):
|
|
137 |
if __name__ == "__main__":
|
138 |
parser = argparse.ArgumentParser()
|
139 |
parser.add_argument("--similarity", default=None, type=float)
|
140 |
-
parser.add_argument("--seed", default=
|
141 |
parser.add_argument("--split", default=0.8, type=float)
|
142 |
parser.add_argument("--commits_file", default="commits.json")
|
143 |
args = parser.parse_args()
|
|
|
137 |
if __name__ == "__main__":
|
138 |
parser = argparse.ArgumentParser()
|
139 |
parser.add_argument("--similarity", default=None, type=float)
|
140 |
+
parser.add_argument("--seed", default=384, type=int)
|
141 |
parser.add_argument("--split", default=0.8, type=float)
|
142 |
parser.add_argument("--commits_file", default="commits.json")
|
143 |
args = parser.parse_args()
|
validate_schemas.py
CHANGED
@@ -4,29 +4,32 @@ from pathlib import Path
|
|
4 |
|
5 |
import json5
|
6 |
import jsonschema
|
7 |
-
import
|
8 |
|
9 |
|
10 |
-
|
11 |
-
for schema_file in tqdm.tqdm(list(data_path.rglob("*.json"))):
|
12 |
# Calculate the path of the new file
|
13 |
new_schema_file = Path("valid_data", *schema_file.parts[1:])
|
14 |
|
15 |
# Skip any directories named with .json at the end
|
16 |
if not schema_file.is_file() and not new_schema_file.is_file():
|
17 |
-
|
18 |
|
19 |
try:
|
20 |
schema = json5.load(open(schema_file))
|
21 |
except ValueError:
|
22 |
-
|
23 |
|
24 |
vcls = jsonschema.validators.validator_for(schema)
|
25 |
try:
|
26 |
vcls.check_schema(schema)
|
27 |
except jsonschema.exceptions.SchemaError:
|
28 |
-
|
29 |
|
30 |
new_schema_file = Path("valid_data", *schema_file.parts[1:])
|
31 |
Path.mkdir(new_schema_file.parent, parents=True, exist_ok=True)
|
32 |
json.dump(schema, open(new_schema_file, "w"), sort_keys=True, indent=2)
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
import json5
|
6 |
import jsonschema
|
7 |
+
from tqdm.contrib.concurrent import process_map
|
8 |
|
9 |
|
10 |
+
def process_file(schema_file):
|
|
|
11 |
# Calculate the path of the new file
|
12 |
new_schema_file = Path("valid_data", *schema_file.parts[1:])
|
13 |
|
14 |
# Skip any directories named with .json at the end
|
15 |
if not schema_file.is_file() and not new_schema_file.is_file():
|
16 |
+
return
|
17 |
|
18 |
try:
|
19 |
schema = json5.load(open(schema_file))
|
20 |
except ValueError:
|
21 |
+
return
|
22 |
|
23 |
vcls = jsonschema.validators.validator_for(schema)
|
24 |
try:
|
25 |
vcls.check_schema(schema)
|
26 |
except jsonschema.exceptions.SchemaError:
|
27 |
+
return
|
28 |
|
29 |
new_schema_file = Path("valid_data", *schema_file.parts[1:])
|
30 |
Path.mkdir(new_schema_file.parent, parents=True, exist_ok=True)
|
31 |
json.dump(schema, open(new_schema_file, "w"), sort_keys=True, indent=2)
|
32 |
+
|
33 |
+
if __name__ == "__main__":
|
34 |
+
data_path = Path("data")
|
35 |
+
process_map(process_file, list(data_path.rglob("*.json")), chunksize=10)
|
validation.jsonl.gz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5b00077250cbf3e7a876ae95f7cf1d5ab0b0c01957bf45cbcbebe9fb8ff79fbd
|
3 |
+
size 4581789
|