Datasets:
File size: 1,519 Bytes
d17161f 27cd921 d17161f 2018fe1 d17161f fc9d99a 2018fe1 9e31ba2 d17161f 9e31ba2 2018fe1 d17161f fc9d99a d17161f 2018fe1 d17161f fc9d99a d17161f 2018fe1 d17161f 2018fe1 27cd921 2018fe1 27cd921 7bd5ffd 2018fe1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import json
import os
from pathlib import Path
import sys
import json5
import jsonschema
from tqdm.contrib.concurrent import process_map
IGNORE_PATHS = [
"node_modules",
"site-packages",
"draft2019-09",
"draft2020-12",
"draft-next",
"vendor",
]
def process_file(schema_file):
# Calculate the path of the new file
new_schema_file = Path("valid_data", *schema_file.parts[1:])
# Skip any directories named with .json at the end
if not schema_file.is_file() and not new_schema_file.is_file():
return
# Skip files in ignored directories
for path in IGNORE_PATHS:
if path in schema_file.parts or path in new_schema_file.parts:
return
try:
schema = json5.load(open(schema_file))
except ValueError:
return
# Skip meta schemas
if schema.get("$id").startswith("https://json-schema.org/"):
continue
vcls = jsonschema.validators.validator_for(schema)
try:
vcls.check_schema(schema)
except jsonschema.exceptions.SchemaError:
return
new_schema_file = Path("valid_data", *schema_file.parts[1:])
Path.mkdir(new_schema_file.parent, parents=True, exist_ok=True)
json.dump(schema, open(new_schema_file, "w"), sort_keys=True, indent=2)
if __name__ == "__main__":
# Increase the recursion limit to handle large schemas
sys.setrecursionlimit(10000)
data_path = Path("fetched_data")
process_map(process_file, list(data_path.rglob("*.json")), chunksize=10)
|