json-schema-keywords / extract_keywords.py
michaelmior's picture
Update with new excluded keywords
76e9382 verified
import collections
import copy
import glob
import json
import os
import random
import sys
import json5
import jsonpath_ng.ext
import tqdm
REMOVE_KEYS = [
"$comment",
"$id",
"$ref",
"Description",
"description",
"markdownDescription",
"title",
]
KEYWORDS = {
"integer": [
"minimum",
"maximum",
"exclusiveMinimum",
"exclusiveMaximum",
"multipleOf",
],
"object": [
"minProperties",
"maxProperties",
],
"string": [
"minLength",
"maxLength",
"format",
"pattern",
],
"array": [
"minContains",
"maxContains",
"minItems",
"maxItems",
"uniqueItems",
],
}
# Numeric and integer types use the same keywords
KEYWORDS["numeric"] = KEYWORDS["integer"]
def find_type_paths(obj, json_type, path=jsonpath_ng.Root()):
if isinstance(obj, dict):
for k, v in obj.items():
# If we have the type keyword and it's value matches, we found one
if k == "type" and v == json_type:
yield path
# Continue recursively through the object's children
yield from find_type_paths(
v, json_type, jsonpath_ng.Child(path, jsonpath_ng.Fields(k))
)
elif isinstance(obj, list):
# Check each list element
for i, v in enumerate(obj):
yield from find_type_paths(
v, json_type, jsonpath_ng.Child(path, jsonpath_ng.Index(i))
)
def write_obj(schema, obj, path, keyword, is_neg):
# Skip any objects that are too large
if len(json.dumps(obj, indent=4)) <= 1024:
print(
json.dumps(
{
"schema": schema,
"obj": obj,
"path": path,
"keyword": keyword,
"is_neg": is_neg,
}
)
)
def build_obj(path, value):
# Build an object with the same shape it has in the entire document
# For example, a path of $.foo.bar[0] would create an object
# that looks like {"foo": {"bar": [value]}}
if isinstance(path, jsonpath_ng.Root):
return value
if isinstance(path.right, (jsonpath_ng.Index, jsonpath_ng.Slice)):
return build_obj(path.left, [value])
elif isinstance(path.right, jsonpath_ng.Fields):
return build_obj(path.left, {path.right.fields[0]: value})
else:
return value
def clean_obj(obj, keep_keyword):
if isinstance(obj, dict):
return {k: clean_obj(v, None) for k, v in obj.items() if k == keep_keyword}
elif isinstance(obj, list):
return [clean_obj(v, keep_keyword) for v in obj]
else:
return obj
def get_examples(data):
neg_pos = collections.defaultdict(lambda: ([], []))
for keyword_type, keywords in KEYWORDS.items():
# Find all schema elements with the given type
found_paths = find_type_paths(data, keyword_type)
for found_path in found_paths:
for found_obj in found_path.find(data):
# For each keyword, check if it is included
for keyword in keywords:
# Append to the 1st or 2nd list depending on
# whether the keyword is used in this object
has_keyword = keyword in found_obj.value
# Remove the keyword since we can't use it to predict
# obj_without_keyword.pop(keyword, None)
obj_without_keyword = clean_obj(found_obj.value, keyword)
# Remove keys we don't expect to have at inference time
for key in REMOVE_KEYS:
obj_without_keyword.pop(key, None)
path_obj = build_obj(found_path, obj_without_keyword)
neg_pos[keyword][has_keyword].append((path_obj, str(found_path)))
return neg_pos
def write_examples(schema, neg_pos):
for keyword, (neg, pos) in neg_pos.items():
# Only include cases where we have at least
# two positive and negative examples
if len(pos) > 1 and len(neg) > 1:
# Write out the positive and negative examples
for pos_item, path in pos:
write_obj(schema, pos_item, path, keyword, False)
# For negative items, generate at most the number of positives
# for (neg_item, path) in random.sample(neg, min(len(pos), len(neg))):
for neg_item, path in neg:
write_obj(schema, neg_item, path, keyword, True)
def main():
schemas = []
for line in sys.stdin:
obj = json.loads(line)
schemas.append(obj)
for schema in tqdm.tqdm(schemas):
data = json5.loads(schema["content"])
neg_pos = get_examples(data)
write_examples(schema["name"], neg_pos)
if __name__ == "__main__":
main()