Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

initial run at creating output tests #619

Merged
merged 21 commits into from
Dec 13, 2022
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
e9f5400
initial run at creating output tests
gregsdennis Nov 23, 2022
87c184d
add .editorconfig; rearrange folders
gregsdennis Nov 23, 2022
fe25ba9
updated readme; fixed $schema typo; tests for 2020-12
gregsdennis Nov 23, 2022
147df48
add output schemas
gregsdennis Nov 23, 2022
efeee1d
update tests to reference output schemas
gregsdennis Nov 23, 2022
46bc7ac
add readme note about output-schema.json files
gregsdennis Nov 23, 2022
daf42a7
attempt to update ci for output tests
gregsdennis Nov 24, 2022
d263bf0
add blank lines at the end of all the files
gregsdennis Nov 28, 2022
691d039
update $dynamic* value in schemas
gregsdennis Nov 28, 2022
b40a6ca
Update bin/jsonschema_suite
gregsdennis Nov 28, 2022
b126759
Update bin/jsonschema_suite
gregsdennis Nov 28, 2022
dc6e820
Merge remote-tracking branch 'origin/main' into gregsdennis/output-tests
Julian Nov 29, 2022
3413863
Inline the relevant parts of the test schema to output tests.
Julian Nov 29, 2022
a8b2805
Minor style tweaks.
Julian Nov 29, 2022
f5197e0
Fix the output check to ignore output-schema.json.
Julian Nov 29, 2022
930e87e
add clarification on no changes between 2019 and 2020
gregsdennis Nov 30, 2022
1a860cf
added $id to all output schemas; reindexed test cases to 0 (instead o…
gregsdennis Dec 2, 2022
c883552
absoluteKeywordLocation is not required when there is no `$ref`
gregsdennis Dec 2, 2022
8ee4323
add some more detail in readme; add redundant keyword to some tests
gregsdennis Dec 4, 2022
c264401
Blacked.
Julian Nov 29, 2022
b538fe7
Bump the validator version used for suite sanity checks.
Julian Dec 6, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
insert_final_newline = true
gregsdennis marked this conversation as resolved.
Show resolved Hide resolved
41 changes: 35 additions & 6 deletions bin/jsonschema_suite
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,13 @@ else:

ROOT_DIR = Path(__file__).parent.parent
SUITE_ROOT_DIR = ROOT_DIR / "tests"
OUTPUT_ROOT_DIR = ROOT_DIR / "output-tests"

REMOTES_DIR = ROOT_DIR / "remotes"
REMOTES_BASE_URL = "http://localhost:1234/"

TESTSUITE_SCHEMA = json.loads((ROOT_DIR / "test-schema.json").read_text())
OUTPUTTESTSUITE_SCHEMA = json.loads((ROOT_DIR / "output-test-schema.json").read_text())


def files(paths):
Expand Down Expand Up @@ -88,12 +90,17 @@ class SanityTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
print(f"Looking for tests in {SUITE_ROOT_DIR}")
print(f"Looking for output tests in {OUTPUT_ROOT_DIR}")
print(f"Looking for remotes in {REMOTES_DIR}")

cls.test_files = list(collect(SUITE_ROOT_DIR))
assert cls.test_files, "Didn't find the test files!"
print(f"Found {len(cls.test_files)} test files")

cls.output_test_files = list(collect(OUTPUT_ROOT_DIR))
assert cls.remote_files, "Didn't find the output test files!"
gregsdennis marked this conversation as resolved.
Show resolved Hide resolved
print(f"Found {len(cls.remote_files)} output test files")
gregsdennis marked this conversation as resolved.
Show resolved Hide resolved

cls.remote_files = list(collect(REMOTES_DIR))
assert cls.remote_files, "Didn't find the remote files!"
print(f"Found {len(cls.remote_files)} remote files")
Expand Down Expand Up @@ -142,6 +149,17 @@ class SanityTests(unittest.TestCase):
except ValueError as error:
self.fail(f"{path} contains invalid JSON ({error})")

def test_all_output_test_files_are_valid_json(self):
"""
All test files contain valid JSON.
"""
for path in self.output_test_files:
with self.subTest(path=path):
try:
json.loads(path.read_text())
except ValueError as error:
self.fail(f"{path} contains invalid JSON ({error})")

def test_all_remote_files_are_valid_json(self):
"""
All remote files contain valid JSON.
Expand All @@ -157,7 +175,7 @@ class SanityTests(unittest.TestCase):
"""
All cases have reasonably long descriptions.
"""
for case in cases(self.test_files):
for case in cases(self.test_files + self.output_test_files):
with self.subTest(description=case["description"]):
self.assertLess(
len(case["description"]),
Expand All @@ -169,7 +187,7 @@ class SanityTests(unittest.TestCase):
"""
All tests have reasonably long descriptions.
"""
for count, test in enumerate(tests(self.test_files)):
for count, test in enumerate(tests(self.test_files + self.output_test_files)):
with self.subTest(description=test["description"]):
self.assertLess(
len(test["description"]),
Expand All @@ -182,28 +200,28 @@ class SanityTests(unittest.TestCase):
"""
All cases have unique descriptions in their files.
"""
for path, cases in files(self.test_files):
for path, cases in files(self.test_files + self.output_test_files):
with self.subTest(path=path):
self.assertUnique(case["description"] for case in cases)

def test_all_test_descriptions_are_unique(self):
"""
All test cases have unique test descriptions in their tests.
"""
for count, case in enumerate(cases(self.test_files)):
for count, case in enumerate(cases(self.test_files + self.output_test_files)):
with self.subTest(description=case["description"]):
self.assertUnique(
test["description"] for test in case["tests"]
)
print(f"Found {count} test cases.")

def test_case_descriptions_do_not_use_modal_verbs(self):
for case in cases(self.test_files):
for case in cases(self.test_files + self.output_test_files):
with self.subTest(description=case["description"]):
self.assertFollowsDescriptionStyle(case["description"])

def test_test_descriptions_do_not_use_modal_verbs(self):
for test in tests(self.test_files):
for test in tests(self.test_files + self.output_test_files):
with self.subTest(description=test["description"]):
self.assertFollowsDescriptionStyle(test["description"])

Expand Down Expand Up @@ -244,6 +262,17 @@ class SanityTests(unittest.TestCase):
validator.validate(cases)
except jsonschema.ValidationError as error:
self.fail(str(error))
"""
All output test files are valid under output-test-schema.json.
"""
Validator = jsonschema.validators.validator_for(OUTPUTTESTSUITE_SCHEMA)
validator = Validator(OUTPUTTESTSUITE_SCHEMA)
for path, cases in files(self.output_test_files):
with self.subTest(path=path):
try:
validator.validate(cases)
except jsonschema.ValidationError as error:
self.fail(str(error))


def main(arguments):
Expand Down
36 changes: 36 additions & 0 deletions output-test-schema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://json-schema.org/tests/output-test-schema",
"description": "A schema for files contained within this suite",

"$ref": "test-schema",

"$defs": {
"test": {
"$dynamicAnchor": "foo",
"description": "A single output test",

"allOf": [
{ "$ref": "test-schema#/$defs/test" },
{
"properties": {
"output": {
"description": "schemas that are used to verify output",
"type": "object",
"properties": {
"flag": { "$ref": "https://json-schema.org/draft/2020-12/schema" },
"basic": { "$ref": "https://json-schema.org/draft/2020-12/schema" },
"detailed": { "$ref": "https://json-schema.org/draft/2020-12/schema" },
"verbose": { "$ref": "https://json-schema.org/draft/2020-12/schema" },
"list": { "$ref": "https://json-schema.org/draft/2020-12/schema" },
"hierarchy": { "$ref": "https://json-schema.org/draft/2020-12/schema" }
},
"minProperties": 1,
"additionalProperties": false
}
}
}
]
}
}
}
30 changes: 30 additions & 0 deletions output-tests/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
These tests are intended to validate that implementations are correctly generating output in accordance with the specification.

Output was initially specified with draft 2019-09. It remained unchanged for draft 2020-12, but will receive an update with the next release.
gregsdennis marked this conversation as resolved.
Show resolved Hide resolved

***NOTE** Although the formats didn't change between 2019-09 and 2020-12, the tests are replicated for 2020-12 because the `$schema` is different and implementations may (but shouldn't) produce different output.*

## Organization

The tests are organized by specification release and then into two categories: content and structure.

Content tests verify that the keywords are producing the correct annotations and/or error messages. Since there are no requirements on the content of error messages, there's not much that can be verified for them, but it is possible to identify when a error message _could_ be present. Primarily, these tests need to extensively cover the annotation behaviors of each keyword. The only output format needed for these tests is `basic`.

Structure tests verify that the structures of the various formats (i.e. `flag`, `basic`, `detailed`, `verbose`) are correct. These tests don't need to cover each keyword; rather they need to sufficiently cover the various aspects of building the output structures by using whatever keywords are necessary to do so.

In each release folder, you'll also find an _output-schema.json_ file that contains the schema from the specification repo that describes output for that release. This schema will need to be loaded as the tests reference it.

## Test Files

The content of a test file is the same as the validation tests in `tests/`, however an `output` property has been added to each test case.
Julian marked this conversation as resolved.
Show resolved Hide resolved

The `output` property itself has a property for each of the output formats where the value is a schema that will successfully validate for compliant output. For the content tests, only `basic` needs to be present.

## Contributing

Of course, first and foremost, follow the [Contributing guide](/CONTRIBUTING.md).

When writing test cases, try to keep output validation schemas targeted to verify a single requirement. Where possible (and where it makes sense), create multiple tests to cover multiple requirements. This will help keep the output validation schemas small and increase readability. (It also increases your test count. 😉)

For the content tests, there is also a _general.json_ file that contains tests that do not necessarily pertain to any single keyword.
<!-- This general.json file may be added to the structure tests later, but I haven't gotten to them yet, so I don't know. -->
42 changes: 42 additions & 0 deletions output-tests/draft-next/content/general.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
[
{
"description": "failed validation produces no annotations",
"schema": {
"$schema": "https://json-schema.org/draft/next/schema",
"$id": "https://json-schema.org/tests/content/draft-next/general/1",
"type": "string",
"readOnly": true
},
"tests": [
{
"description": "dropped annotations MAY appear in droppedAnnotations",
"data": 1,
"output": {
"basic": {
"$ref": "/draft/next/output/schema",
"properties": {
"details": {
"contains": {
"properties": {
"evaluationPath": {"const": ""},
"schemaLocation": {"const": "https://json-schema.org/tests/content/draft-next/general/1"},
"instanceLocation": {"const": ""},
"annotations": false,
"droppedAnnotations": {
"properties": {
"readOnly": {"const": true}
},
"required": ["readOnly"]
}
},
"required": ["evaluationPath", "schemaLocation", "instanceLocation"]
}
}
},
"required": ["details"]
}
}
}
]
}
]
40 changes: 40 additions & 0 deletions output-tests/draft-next/content/readOnly.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
[
{
"description": "readOnly generates its value as an annotation",
"schema": {
"$schema": "https://json-schema.org/draft/next/schema",
"$id": "https://json-schema.org/tests/content/draft-next/readOnly/1",
"readOnly": true
},
"tests": [
{
"description": "readOnly is true",
"data": 1,
"output": {
"basic": {
"$ref": "/draft/next/output/schema",
"properties": {
"details": {
"contains": {
"properties": {
"evaluationPath": {"const": ""},
"schemaLocation": {"const": "https://json-schema.org/tests/content/draft-next/readOnly/1"},
"instanceLocation": {"const": ""},
"annotations": {
"properties": {
"readOnly": {"const": true}
},
"required": ["readOnly"]
}
},
"required": ["evaluationPath", "schemaLocation", "instanceLocation", "annotations"]
}
}
},
"required": ["details"]
}
}
}
]
}
]
38 changes: 38 additions & 0 deletions output-tests/draft-next/content/type.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
[
{
"description": "incorrect type",
"schema": {
"$schema": "https://json-schema.org/draft/next/schema",
"$id": "https://json-schema.org/tests/content/draft-next/type/1",
"type": "string"
},
"tests": [
{
"description": "incorrect type must be reported, but a message is not required",
"data": 1,
"output": {
"basic": {
"$ref": "/draft/next/output/schema",
"properties": {
"details": {
"contains": {
"properties": {
"evaluationPath": {"const": ""},
"schemaLocation": {"const": "https://json-schema.org/tests/content/draft-next/type/1"},
"instanceLocation": {"const": ""},
"annotations": false,
"errors": {
"required": ["type"]
}
},
"required": ["evaluationPath", "schemaLocation", "instanceLocation"]
}
}
},
"required": ["details"]
}
}
}
]
}
]
Loading