diff --git a/Makefile b/Makefile index 60bcfbc..9043adc 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ upgrade-submodules: git submodule update --remote --init --recursive # TODO: Expand to lib/ and tests/ as linting issues are resolved. -RUFF_TARGET = lib/pyld/context_resolver.py lib/pyld/identifier_issuer.py lib/pyld/iri_resolver.py lib/pyld/nquads.py lib/pyld/resolved_context.py tests/test_document_loader.py +RUFF_TARGET = lib/pyld/context_resolver.py lib/pyld/identifier_issuer.py lib/pyld/iri_resolver.py lib/pyld/nquads.py lib/pyld/resolved_context.py tests/*.py lint: ruff check $(RUFF_TARGET) diff --git a/requirements-test.txt b/requirements-test.txt index a61377f..28a66ea 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,3 +1,4 @@ ruff pytest pytest-cov +typing_extensions diff --git a/tests/conftest.py b/tests/conftest.py index 46bac72..bc15e24 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,7 @@ import os import unittest +from contextlib import suppress + import pytest # Import the existing test runner module so we can reuse Manifest/Test @@ -10,10 +12,23 @@ def pytest_addoption(parser): # Do only long options for pytest integration; pytest reserves # lowercase single-letter short options for its own CLI flags. - parser.addoption('--tests', nargs='*', default=[], help='A manifest or directory to test') - parser.addoption('--earl', dest='earl', help='The filename to write an EARL report to') - parser.addoption('--loader', dest='loader', default='requests', help='The remote URL document loader: requests, aiohttp') - parser.addoption('--number', dest='number', help='Limit tests to those containing the specified test identifier') + parser.addoption( + '--tests', nargs='*', default=[], help='A manifest or directory to test' + ) + parser.addoption( + '--earl', dest='earl', help='The filename to write an EARL report to' + ) + parser.addoption( + '--loader', + dest='loader', + default='requests', + help='The remote URL document loader: requests, aiohttp', + ) + parser.addoption( + '--number', + dest='number', + help='Limit tests to those containing the specified test identifier', + ) def pytest_configure(config): @@ -26,9 +41,13 @@ def pytest_configure(config): # existing `runtests` helpers behave the same as the CLI runner. loader = config.getoption('loader') if loader == 'requests': - runtests.jsonld._default_document_loader = runtests.jsonld.requests_document_loader() + runtests.jsonld._default_document_loader = ( + runtests.jsonld.requests_document_loader() + ) elif loader == 'aiohttp': - runtests.jsonld._default_document_loader = runtests.jsonld.aiohttp_document_loader() + runtests.jsonld._default_document_loader = ( + runtests.jsonld.aiohttp_document_loader() + ) number = config.getoption('number') if number: @@ -85,7 +104,7 @@ def pytest_generate_tests(metafunc): 'description': 'Top level PyLD test manifest', 'name': 'PyLD', 'sequence': [], - 'filename': '/' + 'filename': '/', } for test in test_targets: @@ -99,7 +118,7 @@ def pytest_generate_tests(metafunc): filename = os.path.join(test, 'manifest.jsonld') if os.path.exists(filename): root_manifest['sequence'].append(os.path.abspath(filename)) - + # Use the existing Manifest loader to create a TestSuite and flatten it suite = runtests.Manifest(root_manifest, root_manifest['filename']).load() tests = list(_flatten_suite(suite)) @@ -114,7 +133,7 @@ def pytest_runtest_makereport(item): # Hookwrapper gives us the final test report via `outcome.get_result()`. outcome = yield rep = outcome.get_result() - + # We only handle the main call phase to match # the behaviour of the original runner which only reported passes # and failures/errors. @@ -139,12 +158,11 @@ def pytest_runtest_makereport(item): if rep.outcome == 'skipped': return - success = (rep.outcome == 'passed') - try: + success = rep.outcome == 'passed' + + # Don't let EARL bookkeeping break test execution; be quiet on error. + with suppress(Exception): earl_report.add_assertion(manifest_test, success) - except Exception: - # Don't let EARL bookkeeping break test execution; be quiet on error. - pass def pytest_sessionfinish(session, exitstatus): diff --git a/tests/runtests.py b/tests/runtests.py index b727dc7..68354c7 100644 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -57,10 +57,11 @@ import datetime import json import os +import re import sys import traceback import unittest -import re + # NOTE: ArgumentParser and TextTestResult were used by the original # TestRunner / EarlTestResult classes. They are obsolete because # pytest now provides the test harness; these imports can be removed @@ -68,8 +69,10 @@ from argparse import ArgumentParser from unittest import TextTestResult +from typing_extensions import override + sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'lib')) -from pyld import jsonld, iri_resolver +from pyld import iri_resolver, jsonld __copyright__ = 'Copyright (c) 2011-2013 Digital Bazaar, Inc.' __license__ = 'New BSD license' @@ -87,7 +90,7 @@ LOCAL_BASES = [ 'https://w3c.github.io/json-ld-api/tests', 'https://w3c.github.io/json-ld-framing/tests', - 'https://github.com/json-ld/normalization/tests' + 'https://github.com/json-ld/normalization/tests', ] SPEC_DIRS = [ @@ -100,14 +103,14 @@ # provides the test harness; this class can be removed once the legacy # CLI runner is deleted. + class TestRunner(unittest.TextTestRunner): """ Loads test manifests and runs tests. """ def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1): - unittest.TextTestRunner.__init__( - self, stream, descriptions, verbosity) + unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity) # The runner uses an ArgumentParser to accept a list of manifests or # test directories and several runner-specific flags (e.g. which @@ -115,6 +118,7 @@ def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1): self.options = {} self.parser = ArgumentParser() + @override def _makeResult(self): return EarlTestResult(self.stream, self.descriptions, self.verbosity) @@ -123,22 +127,42 @@ def main(self): print('Use -h or --help to view options.\\n') # add program options - self.parser.add_argument('tests', metavar='TEST', nargs='*', - help='A manifest or directory to test') - self.parser.add_argument('-e', '--earl', dest='earl', - help='The filename to write an EARL report to') - self.parser.add_argument('-b', '--bail', dest='bail', - action='store_true', default=False, - help='Bail out as soon as any test fails') - self.parser.add_argument('-l', '--loader', dest='loader', + self.parser.add_argument( + 'tests', metavar='TEST', nargs='*', help='A manifest or directory to test' + ) + self.parser.add_argument( + '-e', '--earl', dest='earl', help='The filename to write an EARL report to' + ) + self.parser.add_argument( + '-b', + '--bail', + dest='bail', + action='store_true', + default=False, + help='Bail out as soon as any test fails', + ) + self.parser.add_argument( + '-l', + '--loader', + dest='loader', default='requests', help='The remote URL document loader: requests, aiohttp ' - '[default: %(default)s]') - self.parser.add_argument('-n', '--number', dest='number', - help='Limit tests to those containing the specified test identifier') - self.parser.add_argument('-v', '--verbose', dest='verbose', - action='store_true', default=False, - help='Print verbose test data') + '[default: %(default)s]', + ) + self.parser.add_argument( + '-n', + '--number', + dest='number', + help='Limit tests to those containing the specified test identifier', + ) + self.parser.add_argument( + '-v', + '--verbose', + dest='verbose', + action='store_true', + default=False, + help='Print verbose test data', + ) # parse command line args self.options = self.parser.parse_args() @@ -160,7 +184,7 @@ def main(self): # Global for saving test numbers to focus on global ONLY_IDENTIFIER if self.options.number: - ONLY_IDENTIFIER = self.options.number + ONLY_IDENTIFIER = self.options.number if len(self.options.tests): # tests given on command line @@ -187,14 +211,14 @@ def main(self): 'description': 'Top level PyLD test manifest', 'name': 'PyLD', 'sequence': [], - 'filename': '/' + 'filename': '/', } for test in test_targets: if os.path.isfile(test): root, ext = os.path.splitext(test) if ext in ['.json', '.jsonld']: root_manifest['sequence'].append(os.path.abspath(test)) - #root_manifest['sequence'].append(test) + # root_manifest['sequence'].append(test) else: raise Exception('Unknown test file ext', root, ext) elif os.path.isdir(test): @@ -208,7 +232,7 @@ def main(self): # load root manifest global ROOT_MANIFEST_DIR - #ROOT_MANIFEST_DIR = os.path.dirname(root_manifest['filename']) + # ROOT_MANIFEST_DIR = os.path.dirname(root_manifest['filename']) ROOT_MANIFEST_DIR = root_manifest['filename'] # Build a Manifest object from the root manifest structure. The # Manifest will recursively load manifests and produce a @@ -221,7 +245,7 @@ def main(self): # output earl report if specified if self.options.earl: filename = os.path.abspath(self.options.earl) - print('Writing EARL report to: %s' % filename) + print(f'Writing EARL report to: {filename}') result.writeReport(filename) if not result.wasSuccessful(): @@ -258,7 +282,8 @@ def load(self): # entry is another manifest if is_jsonld_type(entry, 'mf:Manifest'): self.suite = unittest.TestSuite( - [self.suite, Manifest(entry, filename).load()]) + [self.suite, Manifest(entry, filename).load()] + ) # If the entry is itself a manifest, recurse into it and # append its TestSuite. This mirrors the structure of the # W3C test manifests where manifests can include other @@ -282,9 +307,10 @@ class Test(unittest.TestCase): # used to distinguish positive/negative/syntax tests so the # runner knows whether an exception is the expected outcome. """ + def __init__(self, manifest, data, filename): unittest.TestCase.__init__(self) - #self.maxDiff = None + # self.maxDiff = None self.manifest = manifest self.data = data self.filename = filename @@ -295,19 +321,17 @@ def __init__(self, manifest, data, filename): self.test_type = None self.pending = False global TEST_TYPES - for t in TEST_TYPES.keys(): + for t in TEST_TYPES: if is_jsonld_type(data, t): self.test_type = t break def __str__(self): - manifest = self.manifest.data.get( - 'name', self.manifest.data.get('label')) + manifest = self.manifest.data.get('name', self.manifest.data.get('label')) test_id = self.data.get('id', self.data.get('@id')) - label = self.data.get( - 'purpose', self.data.get('name', self.data.get('label'))) + label = self.data.get('purpose', self.data.get('name', self.data.get('label'))) - return ('%s: %s: %s' % (manifest, test_id, label)) + return f'{manifest}: {test_id}: {label}' def _get_expect_property(self): '''Find the expected output property or raise error.''' @@ -325,6 +349,7 @@ def _get_expect_error_code_property(self): else: raise Exception('No expectErrorCode property found') + @override def setUp(self): data = self.data manifest = self.manifest @@ -334,7 +359,7 @@ def setUp(self): types.extend(get_jsonld_values(data, '@type')) types.extend(get_jsonld_values(data, 'type')) if self.test_type is None or self.test_type in SKIP_TESTS: - self.skipTest('Test type of %s' % types) + self.skipTest(f'Test type of {types}') global TEST_TYPES test_info = TEST_TYPES[self.test_type] @@ -342,8 +367,10 @@ def setUp(self): # expand @id and input base if 'baseIri' in manifest.data: data['@id'] = ( - manifest.data['baseIri'] + - os.path.basename(str.replace(manifest.filename, '.jsonld', '')) + data['@id']) + manifest.data['baseIri'] + + os.path.basename(str.replace(manifest.filename, '.jsonld', '')) + + data['@id'] + ) self.base = self.manifest.data['baseIri'] + data['input'] # When manifests define a `baseIri` the runner patches the test @@ -354,32 +381,31 @@ def setUp(self): skip_id_re = test_info.get('skip', {}).get('idRegex', []) for regex in skip_id_re: if re.match(regex, data.get('@id', data.get('id', ''))): - self.skipTest('Test with id regex %s' % regex) + self.skipTest(f'Test with id regex {regex}') # mark tests as pending, meaning that they are expected to fail pending_id_re = test_info.get('pending', {}).get('idRegex', []) for regex in pending_id_re: if re.match(regex, data.get('@id', data.get('id', ''))): - self.pending = 'Test with id regex %s' % regex + self.pending = f'Test with id regex {regex}' # skip based on description regular expression - skip_description_re = test_info.get('skip', {}).get( - 'descriptionRegex', []) + skip_description_re = test_info.get('skip', {}).get('descriptionRegex', []) for regex in skip_description_re: if re.match(regex, data.get('description', '')): - self.skipTest('Test with description regex %s' % regex) + self.skipTest(f'Test with description regex {regex}') # skip based on processingMode skip_pm = test_info.get('skip', {}).get('processingMode', []) data_pm = data.get('option', {}).get('processingMode', None) if data_pm in skip_pm: - self.skipTest('Test with processingMode %s' % data_pm) + self.skipTest(f'Test with processingMode {data_pm}') # skip based on specVersion skip_sv = test_info.get('skip', {}).get('specVersion', []) data_sv = data.get('option', {}).get('specVersion', None) if data_sv in skip_sv: - self.skipTest('Test with specVersion %s' % data_sv) + self.skipTest(f'Test with specVersion {data_sv}') # mark tests to run with local loader run_remote_re = test_info.get('runLocal', []) @@ -392,6 +418,7 @@ def setUp(self): # for reproducing the official test-suite behavior without network # access. + @override def runTest(self): data = self.data global TEST_TYPES @@ -427,16 +454,22 @@ def runTest(self): self.assertTrue(True) elif self.test_type == 'jld:ToRDFTest': # Test normalized results - result = jsonld.normalize(result, { - 'algorithm': 'URGNA2012', - 'inputFormat': 'application/n-quads', - 'format': 'application/n-quads' - }) - expect = jsonld.normalize(expect, { - 'algorithm': 'URGNA2012', - 'inputFormat': 'application/n-quads', - 'format': 'application/n-quads' - }) + result = jsonld.normalize( + result, + { + 'algorithm': 'URGNA2012', + 'inputFormat': 'application/n-quads', + 'format': 'application/n-quads', + }, + ) + expect = jsonld.normalize( + expect, + { + 'algorithm': 'URGNA2012', + 'inputFormat': 'application/n-quads', + 'format': 'application/n-quads', + }, + ) if result == expect: self.assertTrue(True) else: @@ -445,7 +478,7 @@ def runTest(self): raise AssertionError('results differ') elif not self.is_negative: # Perform order-independent equivalence test - if equalUnordered(result, expect): + if equal_unordered(result, expect): self.assertTrue(True) else: print('\nEXPECTED: ', json.dumps(expect, indent=2)) @@ -457,15 +490,13 @@ def runTest(self): raise AssertionError('pending positive test passed') except AssertionError as e: if e.args[0] == 'pending positive test passed': - print(e) - raise e + print(e) + raise e elif not self.is_negative and not self.pending: print('\nEXPECTED: ', json.dumps(expect, indent=2)) print('ACTUAL: ', json.dumps(result, indent=2)) raise e - elif not self.is_negative: - print('pending') - elif self.is_negative and self.pending: + elif not self.is_negative or (self.is_negative and self.pending): print('pending') else: raise e @@ -477,29 +508,31 @@ def runTest(self): result = get_jsonld_error_code(e) if self.pending and result == expect: print('pending negative test passed') - raise AssertionError('pending negative test passed') + raise AssertionError('pending negative test passed') from e elif self.pending: print('pending') else: - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() self.assertEqual(result, expect) + # Compare values with order-insensitive array tests -def equalUnordered(result, expect): +def equal_unordered(result, expect): """ `equalUnordered` implements a simple structural equivalence check that ignores ordering in lists. It is used to compare JSON-LD results where arrays are considered unordered by the test-suite semantics. """ if isinstance(result, list) and isinstance(expect, list): - return(len(result) == len(expect) and - all(any(equalUnordered(v1, v2) for v2 in expect) for v1 in result)) + return len(result) == len(expect) and all( + any(equal_unordered(v1, v2) for v2 in expect) for v1 in result + ) elif isinstance(result, dict) and isinstance(expect, dict): - return(len(result) == len(expect) and - all(k in expect and equalUnordered(v, expect[k]) for k, v in result.items())) + return len(result) == len(expect) and all( + k in expect and equal_unordered(v, expect[k]) for k, v in result.items() + ) else: - return(result == expect) - + return result == expect def is_jsonld_type(node, type_): @@ -559,18 +592,11 @@ def read_json(filename): def read_file(filename): - """Read a file and return its contents as text. - - This wrapper ensures consistent text handling across Python 2/3 by - decoding bytes for older Python versions. In the current project we - expect Python 3, but the compatibility guard is kept to match the - original test-runner behavior. + """ + Read a file and return its contents as text. """ with open(filename) as f: - if sys.version_info[0] >= 3: - return f.read() - else: - return f.read().decode('utf8') + return f.read() def read_test_url(property): @@ -582,6 +608,7 @@ def read_test_url(property): accepts a `Test` instance and returns the fully-resolved URL (or `None` if the property is missing). """ + def read(test): if property not in test.data: return None @@ -589,6 +616,7 @@ def read(test): return test.manifest.data['baseIri'] + test.data[property] else: return test.data[property] + return read @@ -602,6 +630,7 @@ def read_test_property(property): `.jsonld` it is parsed as JSON; otherwise the raw file contents are returned. """ + def read(test): if property not in test.data: return None @@ -610,6 +639,7 @@ def read(test): return read_json(filename) else: return read_file(filename) + return read @@ -623,6 +653,7 @@ def create_test_options(opts=None): passed to the factory, wires in the test-specific `documentLoader`, and resolves `expandContext` files when present. """ + def create(test): http_options = ['contentType', 'httpLink', 'httpStatus', 'redirectTo'] test_options = test.data.get('option', {}) @@ -636,6 +667,7 @@ def create(test): filename = os.path.join(test.dirname, options['expandContext']) options['expandContext'] = read_json(filename) return options + return create @@ -650,20 +682,18 @@ def create_document_loader(test): """ loader = jsonld.get_document_loader() - - def is_test_suite_url(url): return any(url.startswith(base) for base in LOCAL_BASES) def strip_base(url): for base in LOCAL_BASES: if url.startswith(base): - return url[len(base):] + return url[len(base) :] raise Exception('unkonwn base') def strip_fragment(url): if '#' in url: - return url[:url.index('#')] + return url[: url.index('#')] else: return url @@ -684,30 +714,37 @@ def load_locally(url): 'contentType': content_type, 'contextUrl': None, 'documentUrl': url, - 'document': None + 'document': None, } if options and url == test.base: - if ('redirectTo' in options and options.get('httpStatus') >= 300): + if 'redirectTo' in options and options.get('httpStatus') >= 300: doc['documentUrl'] = ( - test.manifest.data['baseIri'] + options['redirectTo']) + test.manifest.data['baseIri'] + options['redirectTo'] + ) elif 'httpLink' in options: link_header = options.get('httpLink', '') if isinstance(link_header, list): link_header = ','.join(link_header) - linked_context = jsonld.parse_link_header( - link_header).get('http://www.w3.org/ns/json-ld#context') + linked_context = jsonld.parse_link_header(link_header).get( + 'http://www.w3.org/ns/json-ld#context' + ) if linked_context and content_type != 'application/ld+json': if isinstance(linked_context, list): raise Exception('multiple context link headers') doc['contextUrl'] = linked_context['target'] - linked_alternate = jsonld.parse_link_header( - link_header).get('alternate') + linked_alternate = jsonld.parse_link_header(link_header).get( + 'alternate' + ) # if not JSON-LD, alternate may point there - if (linked_alternate and - linked_alternate.get('type') == 'application/ld+json' and - not re.match(r'^application\/(\w*\+)?json$', content_type)): + if ( + linked_alternate + and linked_alternate.get('type') == 'application/ld+json' + and not re.match(r'^application\/(\w*\+)?json$', content_type) + ): doc['contentType'] = 'application/ld+json' - doc['documentUrl'] = iri_resolver.resolve(linked_alternate['target'], url) + doc['documentUrl'] = iri_resolver.resolve( + linked_alternate['target'], url + ) global ROOT_MANIFEST_DIR if doc['documentUrl'].find(':') == -1: filename = os.path.join(ROOT_MANIFEST_DIR, doc['documentUrl']) @@ -723,8 +760,9 @@ def load_locally(url): def local_loader(url, headers): # always load remote-doc tests remotely # (some skipped due to lack of reasonable HTTP header support) - if (test.manifest.data.get('name') == 'Remote document' and - not test.data.get('runLocal')): + if test.manifest.data.get('name') == 'Remote document' and not test.data.get( + 'runLocal' + ): return loader(url) # always load non-base tests remotely @@ -736,6 +774,7 @@ def local_loader(url, headers): return local_loader + # NOTE: The EarlTestResult class can be removed because pytest now # provides the test harness; this class can be removed once the legacy # CLI runner is deleted. @@ -748,27 +787,32 @@ class EarlTestResult(TextTestResult): `EarlReport` instance so a machine-readable report can be emitted at the end of a test run. """ + def __init__(self, stream, descriptions, verbosity): TextTestResult.__init__(self, stream, descriptions, verbosity) self.report = EarlReport() + @override def addError(self, test, err): TextTestResult.addError(self, test, err) self.report.add_assertion(test, False) + @override def addFailure(self, test, err): TextTestResult.addFailure(self, test, err) self.report.add_assertion(test, False) + @override def addSuccess(self, test): TextTestResult.addSuccess(self, test) self.report.add_assertion(test, True) + @override def writeReport(self, filename): self.report.write(filename) -class EarlReport(): +class EarlReport: """ Generates an EARL report. """ @@ -776,8 +820,9 @@ class EarlReport(): def __init__(self): # Load package metadata (version) from the library's __about__.py about = {} - with open(os.path.join( - os.path.dirname(__file__), '..', 'lib', 'pyld', '__about__.py')) as fp: + with open( + os.path.join(os.path.dirname(__file__), '..', 'lib', 'pyld', '__about__.py') + ) as fp: exec(fp.read(), about) # Timestamp used for test results self.now = datetime.datetime.utcnow().replace(microsecond=0) @@ -801,14 +846,10 @@ def __init__(self): 'earl:test': {'@type': '@id'}, 'earl:outcome': {'@type': '@id'}, 'dc:date': {'@type': 'xsd:date'}, - 'doap:created': {'@type': 'xsd:date'} + 'doap:created': {'@type': 'xsd:date'}, }, '@id': 'https://github.com/digitalbazaar/pyld', - '@type': [ - 'doap:Project', - 'earl:TestSubject', - 'earl:Software' - ], + '@type': ['doap:Project', 'earl:TestSubject', 'earl:Software'], 'doap:name': 'PyLD', 'dc:title': 'PyLD', 'doap:homepage': 'https://github.com/digitalbazaar/pyld', @@ -818,35 +859,34 @@ def __init__(self): 'dc:creator': 'https://github.com/dlongley', 'doap:developer': { '@id': 'https://github.com/dlongley', - '@type': [ - 'foaf:Person', - 'earl:Assertor' - ], + '@type': ['foaf:Person', 'earl:Assertor'], 'foaf:name': 'Dave Longley', - 'foaf:homepage': 'https://github.com/dlongley' + 'foaf:homepage': 'https://github.com/dlongley', }, 'doap:release': { 'doap:name': 'PyLD ' + about['__version__'], 'doap:revision': about['__version__'], - 'doap:created': self.now.strftime('%Y-%m-%d') + 'doap:created': self.now.strftime('%Y-%m-%d'), }, - 'subjectOf': [] + 'subjectOf': [], } def add_assertion(self, test, success): # Append an EARL assertion describing a single test outcome. The # `earl:outcome` is either `earl:passed` or `earl:failed`. - self.report['subjectOf'].append({ - '@type': 'earl:Assertion', - 'earl:assertedBy': self.report['doap:developer']['@id'], - 'earl:mode': 'earl:automatic', - 'earl:test': test.data.get('id', test.data.get('@id')), - 'earl:result': { - '@type': 'earl:TestResult', - 'dc:date': self.now.isoformat() + 'Z', - 'earl:outcome': 'earl:passed' if success else 'earl:failed' + self.report['subjectOf'].append( + { + '@type': 'earl:Assertion', + 'earl:assertedBy': self.report['doap:developer']['@id'], + 'earl:mode': 'earl:automatic', + 'earl:test': test.data.get('id', test.data.get('@id')), + 'earl:result': { + '@type': 'earl:TestResult', + 'dc:date': self.now.isoformat() + 'Z', + 'earl:outcome': 'earl:passed' if success else 'earl:failed', + }, } - }) + ) return self def write(self, filename): @@ -870,14 +910,14 @@ def write(self, filename): '.*compact-manifest#tm023$', '.*compact-manifest#t0113$', '.*compact-manifest#tc028$', - ] + ], }, 'fn': 'compact', 'params': [ read_test_url('input'), read_test_property('context'), - create_test_options() - ] + create_test_options(), + ], }, 'jld:ExpandTest': { 'pending': {}, @@ -909,13 +949,10 @@ def write(self, filename): '.*expand-manifest#tc038$', '.*expand-manifest#ter54$', '.*expand-manifest#ter56$', - ] + ], }, 'fn': 'expand', - 'params': [ - read_test_url('input'), - create_test_options() - ] + 'params': [read_test_url('input'), create_test_options()], }, 'jld:FlattenTest': { 'pending': {}, @@ -926,14 +963,14 @@ def write(self, filename): 'idRegex': [ # uncategorized html '.*html-manifest#tf004$', - ] + ], }, 'fn': 'flatten', 'params': [ read_test_url('input'), read_test_property('context'), - create_test_options() - ] + create_test_options(), + ], }, 'jld:FrameTest': { 'pending': {}, @@ -944,14 +981,14 @@ def write(self, filename): 'idRegex': [ # uncategorized '.*frame-manifest#t0069$', - ] + ], }, 'fn': 'frame', 'params': [ read_test_url('input'), read_test_property('frame'), - create_test_options() - ] + create_test_options(), + ], }, 'jld:FromRDFTest': { 'skip': { @@ -962,21 +999,21 @@ def write(self, filename): '.*fromRdf-manifest#tdi12$', # uncategorized '.*fromRdf-manifest#t0027$', - ] + ], }, 'fn': 'from_rdf', 'params': [ read_test_property('input'), - create_test_options({'format': 'application/n-quads'}) - ] + create_test_options({'format': 'application/n-quads'}), + ], }, 'jld:NormalizeTest': { 'skip': {}, 'fn': 'normalize', 'params': [ read_test_property('input'), - create_test_options({'format': 'application/n-quads'}) - ] + create_test_options({'format': 'application/n-quads'}), + ], }, 'jld:ToRDFTest': { 'pending': { @@ -1011,19 +1048,16 @@ def write(self, filename): # node object direction '.*toRdf-manifest#tdi11$', '.*toRdf-manifest#tdi12$', - ] + ], }, 'fn': 'to_rdf', 'params': [ read_test_url('input'), - create_test_options({'format': 'application/n-quads'}) - ] + create_test_options({'format': 'application/n-quads'}), + ], }, 'rdfn:Urgna2012EvalTest': { - 'pending': { - 'idRegex': [ - ] - }, + 'pending': {'idRegex': []}, 'skip': { 'idRegex': [ '.*manifest-urgna2012#test060$', @@ -1032,18 +1066,17 @@ def write(self, filename): 'fn': 'normalize', 'params': [ read_test_property('action'), - create_test_options({ - 'algorithm': 'URGNA2012', - 'inputFormat': 'application/n-quads', - 'format': 'application/n-quads' - }) - ] + create_test_options( + { + 'algorithm': 'URGNA2012', + 'inputFormat': 'application/n-quads', + 'format': 'application/n-quads', + } + ), + ], }, 'rdfn:Urdna2015EvalTest': { - 'pending': { - 'idRegex': [ - ] - }, + 'pending': {'idRegex': []}, 'skip': { 'idRegex': [ '.*manifest-urdna2015#test060$', @@ -1052,13 +1085,15 @@ def write(self, filename): 'fn': 'normalize', 'params': [ read_test_property('action'), - create_test_options({ - 'algorithm': 'URDNA2015', - 'inputFormat': 'application/n-quads', - 'format': 'application/n-quads' - }) - ] - } + create_test_options( + { + 'algorithm': 'URDNA2015', + 'inputFormat': 'application/n-quads', + 'format': 'application/n-quads', + } + ), + ], + }, } diff --git a/tests/test_iri_resolver.py b/tests/test_iri_resolver.py index eac39de..b7b2d1c 100644 --- a/tests/test_iri_resolver.py +++ b/tests/test_iri_resolver.py @@ -1,9 +1,11 @@ import pytest -from pyld.iri_resolver import resolve, unresolve, remove_dot_segments + +from pyld.iri_resolver import remove_dot_segments, resolve, unresolve # Tests ported from relative-to-absolute-iri.js: https://github.com/rubensworks/relative-to-absolute-iri.js/blob/master/test/Resolve-test.ts # (c) Ruben Taelman + # ---------- Tests for resolve() ---------- class TestResolve: def test_absolute_iri_no_base(self): @@ -13,7 +15,9 @@ def test_absolute_iri_empty_base(self): assert resolve('http://example.org/', '') == 'http://example.org/' def test_absolute_iri_with_base(self): - assert resolve('http://example.org/', 'http://base.org/') == 'http://example.org/' + assert ( + resolve('http://example.org/', 'http://base.org/') == 'http://example.org/' + ) def test_empty_value_uses_base(self): assert resolve('', 'http://base.org/') == 'http://base.org/' @@ -22,7 +26,9 @@ def test_relative_with_scheme_no_base(self): assert resolve('ex:abc') == 'ex:abc' def test_relative_without_scheme_no_base_error(self): - with pytest.raises(ValueError, match=r"Found invalid relative IRI 'abc' for a missing baseIRI"): + with pytest.raises( + ValueError, match=r"Found invalid relative IRI 'abc' for a missing baseIRI" + ): resolve('abc') def test_relative_without_dot_segments_no_base(self): @@ -44,11 +50,15 @@ def test_colon_in_value_removes_dots(self): assert resolve('http://abc/../../', 'http://base.org/') == 'http://abc/' def test_non_absolute_base_error(self): - with pytest.raises(ValueError, match=r"Found invalid baseIRI 'def' for value 'abc'"): + with pytest.raises( + ValueError, match=r"Found invalid baseIRI 'def' for value 'abc'" + ): resolve('abc', 'def') def test_non_absolute_base_empty_value_error(self): - with pytest.raises(ValueError, match=r"Found invalid baseIRI 'def' for value ''"): + with pytest.raises( + ValueError, match=r"Found invalid baseIRI 'def' for value ''" + ): resolve('', 'def') def test_scheme_from_base_if_value_starts_with_slash_slash(self): @@ -79,7 +89,10 @@ def test_base_only_scheme_dot_segments(self): assert resolve('abc/./', 'http:') == 'http:abc/' def test_absolute_path_ignores_base_path(self): - assert resolve('/abc/def/', 'http://base.org/123/456/') == 'http://base.org/abc/def/' + assert ( + resolve('/abc/def/', 'http://base.org/123/456/') + == 'http://base.org/abc/def/' + ) def test_base_with_last_slash_replacement(self): assert resolve('xyz', 'http://aa/a') == 'http://aa/xyz' @@ -163,7 +176,9 @@ def test_middle_semicolon_relative_with_complex_base(self): assert resolve('g;x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g;x' def test_semicolon_questionmark_and_hashtag_relative_with_complex_base(self): - assert resolve('g;x?y#s', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g;x?y#s' + assert ( + resolve('g;x?y#s', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g;x?y#s' + ) def test_empty_relative_with_complex_base(self): assert resolve('', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/d;p?q' @@ -176,7 +191,7 @@ def test_dot_slash_relative_with_complex_base(self): def test_double_dot_relative_with_complex_base(self): assert resolve('..', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/' - + def test_double_dot_slash_relative_with_complex_base(self): assert resolve('../', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/' @@ -235,46 +250,75 @@ def test_g_slash_double_dot_slash_h_relative_with_complex_base(self): assert resolve('g/../h', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/h' def test_g_semicolon_x_equals_1_slash_dot_slash_y_relative_with_complex_base(self): - assert resolve('g;x=1/./y', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g;x=1/y' + assert ( + resolve('g;x=1/./y', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g;x=1/y' + ) - def test_g_semicolon_x_equals_1_slash_double_dot_slash_y_relative_with_complex_base(self): + def test_g_semicolon_x_equals_1_slash_double_dot_slash_y_relative_with_complex_base( + self, + ): assert resolve('g;x=1/../y', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/y' def test_g_questionmark_y_slash_dot_slash_x_relative_with_complex_base(self): - assert resolve('g?y/./x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g?y/./x' + assert ( + resolve('g?y/./x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g?y/./x' + ) def test_g_questionmark_y_slash_double_dot_slash_x_relative_with_complex_base(self): - assert resolve('g?y/../x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g?y/../x' + assert ( + resolve('g?y/../x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g?y/../x' + ) def test_g_hash_s_slash_dot_slash_x_relative_with_complex_base(self): - assert resolve('g#s/./x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g#s/./x' + assert ( + resolve('g#s/./x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g#s/./x' + ) def test_g_hash_s_slash_double_dot_slash_x_relative_with_complex_base(self): - assert resolve('g#s/../x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g#s/../x' + assert ( + resolve('g#s/../x', 'file:///a/bb/ccc/d;p?q') == 'file:///a/bb/ccc/g#s/../x' + ) def test_http_colon_g_relative_with_complex_base(self): assert resolve('http:g', 'file:///a/bb/ccc/d;p?q') == 'http:g' def test_complex_relative_with_complex_base(self): - assert resolve('//example.org/.././useless/../../scheme-relative', 'http://example.com/some/deep/directory/and/file#with-a-fragment') == 'http://example.org/scheme-relative' + assert ( + resolve( + '//example.org/.././useless/../../scheme-relative', + 'http://example.com/some/deep/directory/and/file#with-a-fragment', + ) + == 'http://example.org/scheme-relative' + ) def test_relative_with_complex_base_without_double_slash_after_scheme(self): assert resolve('a', 'tag:example') == 'tag:a' - def test_relative_with_complex_base_without_double_slash_after_scheme_with_one_slash(self): + def test_relative_with_complex_base_without_double_slash_after_scheme_with_one_slash( + self, + ): assert resolve('a', 'tag:example/foo') == 'tag:example/a' - def test_relative_a_with_base_without_double_slash_after_scheme_with_two_slash(self): + def test_relative_a_with_base_without_double_slash_after_scheme_with_two_slash( + self, + ): assert resolve('a', 'tag:example/foo/') == 'tag:example/foo/a' def test_relative_with_triple_dot_segment_and_double_dot_and_base(self): - assert resolve('../.../../', 'http://example.org/a/b/c/') == 'http://example.org/a/b/' + assert ( + resolve('../.../../', 'http://example.org/a/b/c/') + == 'http://example.org/a/b/' + ) def test_relative_with_triple_dot_segment_and_2x_double_dot_and_base(self): - assert resolve('../.../../../', 'http://example.org/a/b/c/') == 'http://example.org/a/' + assert ( + resolve('../.../../../', 'http://example.org/a/b/c/') + == 'http://example.org/a/' + ) def test_questionmark_prefix_relative_with_complex_base_with_dot(self): - assert resolve('?y','http://a/bb/ccc/./d;p?q') == 'http://a/bb/ccc/./d;p?y' + assert resolve('?y', 'http://a/bb/ccc/./d;p?q') == 'http://a/bb/ccc/./d;p?y' + # ---------- Tests for unresolve() ---------- class TestUnresolve: @@ -285,7 +329,10 @@ def test_absolute_iri_empty_base(self): assert unresolve('http://example.org/', '') == 'http://example.org/' def test_absolute_iri_with_base(self): - assert unresolve('http://example.org/', 'http://base.org/') == 'http://example.org/' + assert ( + unresolve('http://example.org/', 'http://base.org/') + == 'http://example.org/' + ) def test_empty_value_uses_base(self): assert unresolve('', 'http://base.org/') == '' @@ -303,11 +350,16 @@ def test_colon_in_value_ignores_base(self): assert unresolve('http:abc', 'http://base.org/') == 'http:abc' def test_non_absolute_base_error(self): - with pytest.raises(ValueError, match=r"Found invalid baseIRI 'def' for value 'http://base.org/abc'"): + with pytest.raises( + ValueError, + match=r"Found invalid baseIRI 'def' for value 'http://base.org/abc'", + ): unresolve('http://base.org/abc', 'def') def test_non_absolute_base_empty_value_error(self): - with pytest.raises(ValueError, match=r"Found invalid baseIRI 'def' for value ''"): + with pytest.raises( + ValueError, match=r"Found invalid baseIRI 'def' for value ''" + ): unresolve('', 'def') def test_base_without_path_slash(self): @@ -319,6 +371,7 @@ def test_base_with_path_slash(self): def test_absolute_iri_with_keyword(self): assert unresolve('http://base.org/@abc', 'http://base.org/') == './@abc' + # ---------- Tests for remove_dot_segments() ---------- class TestRemoveDotSegments: def test_no_slash(self): @@ -439,10 +492,17 @@ def test_triple_dots_as_normal_segment_followed_by_double_dots(self): assert remove_dot_segments('/invalid/.../..') == '/invalid/' def test_four_dots_as_normal_segment(self): - assert remove_dot_segments('/invalid/../..../../../.../.htaccess') == '/.../.htaccess' + assert ( + remove_dot_segments('/invalid/../..../../../.../.htaccess') + == '/.../.htaccess' + ) def test_segment_with_dot_and_invalid_char_as_normal_segment(self): - assert remove_dot_segments('/invalid/../.a/../../.../.htaccess') == '/.../.htaccess' + assert ( + remove_dot_segments('/invalid/../.a/../../.../.htaccess') + == '/.../.htaccess' + ) + if __name__ == "__main__": pytest.main(["-v", __file__]) diff --git a/tests/test_jsonld.py b/tests/test_jsonld.py index c9749b1..a12016f 100644 --- a/tests/test_jsonld.py +++ b/tests/test_jsonld.py @@ -315,7 +315,7 @@ def fake_loader(url, options): "contentType": "application/json+ld", } else: - raise Exception("Unknown URL: {}".format(url)) + raise Exception(f"Unknown URL: {url}") options = {"documentLoader": fake_loader, "omitGraph": False} return jsonld.frame(input, "http://example.com/frame.json", options=options) diff --git a/tests/test_manifests.py b/tests/test_manifests.py index a72b84b..de3380a 100644 --- a/tests/test_manifests.py +++ b/tests/test_manifests.py @@ -1,6 +1,7 @@ -import pytest import unittest +import pytest + # Reuse the existing Test wrapper from `runtests.py`. The pytest test # simply calls `setUp()` and `runTest()` on the original Test instance # so that all existing behavior and comparison logic remains unchanged.